From 19dcf09d1d63f1abda1ff3265c9187a9157543d8 Mon Sep 17 00:00:00 2001 From: Veronica Wasson <3992422+VeronicaWasson@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:39:31 -0700 Subject: [PATCH 01/66] docs(samples): Add Dataflow snippet for reading from Cloud Storage (#9568) --- .../com/example/dataflow/ReadFromStorage.java | 61 ++++++++++++ .../com/example/dataflow/ApacheIcebergIT.java | 7 +- .../com/example/dataflow/BigQueryWriteIT.java | 7 +- .../com/example/dataflow/BiqQueryReadIT.java | 7 +- .../com/example/dataflow/PubSubWriteIT.java | 7 +- .../example/dataflow/ReadFromStorageIT.java | 93 +++++++++++++++++++ 6 files changed, 166 insertions(+), 16 deletions(-) create mode 100644 dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java create mode 100644 dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java diff --git a/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java b/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java new file mode 100644 index 00000000000..4554466205f --- /dev/null +++ b/dataflow/snippets/src/main/java/com/example/dataflow/ReadFromStorage.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +// [START dataflow_read_from_cloud_storage] +import org.apache.beam.sdk.Pipeline; +import org.apache.beam.sdk.PipelineResult; +import org.apache.beam.sdk.io.TextIO; +import org.apache.beam.sdk.options.Description; +import org.apache.beam.sdk.options.PipelineOptions; +import org.apache.beam.sdk.options.PipelineOptionsFactory; +import org.apache.beam.sdk.transforms.MapElements; +import org.apache.beam.sdk.values.TypeDescriptors; + +public class ReadFromStorage { + // [END dataflow_read_from_cloud_storage] + public interface Options extends PipelineOptions { + @Description("The Cloud Storage bucket to read from") + String getBucket(); + + void setBucket(String value); + } + + public static PipelineResult.State main(String[] args) { + var options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class); + Pipeline pipeline = createPipeline(options); + return pipeline.run().waitUntilFinish(); + } + + // [START dataflow_read_from_cloud_storage] + public static Pipeline createPipeline(Options options) { + var pipeline = Pipeline.create(options); + pipeline + // Read from a text file. + .apply(TextIO.read().from( + "gs://" + options.getBucket() + "/*.txt")) + .apply( + MapElements.into(TypeDescriptors.strings()) + .via( + (x -> { + System.out.println(x); + return x; + }))); + return pipeline; + } +} +// [END dataflow_read_from_cloud_storage] diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java index 0a6e0149b65..1c7cfcbe213 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/ApacheIcebergIT.java @@ -53,7 +53,7 @@ public class ApacheIcebergIT { private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private static final String CATALOG_NAME = "local"; private static final String TABLE_NAME = "table1"; @@ -112,8 +112,7 @@ private void writeTableRecord() @Before public void setUp() throws IOException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); // Create an Apache Iceberg catalog with a table. warehouseDirectory = Files.createTempDirectory("test-warehouse"); @@ -131,7 +130,7 @@ public void setUp() throws IOException { @After public void tearDown() throws IOException { Files.deleteIfExists(Paths.get(OUTPUT_FILE_NAME)); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java index 983d0c3e92e..e785010f961 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/BigQueryWriteIT.java @@ -47,7 +47,7 @@ public class BigQueryWriteIT { private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private BigQuery bigquery; private String datasetName; private String tableName; @@ -65,8 +65,7 @@ private void createTable() { @Before public void setUp() throws InterruptedException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); bigquery = BigQueryOptions.getDefaultInstance().getService(); @@ -79,7 +78,7 @@ public void setUp() throws InterruptedException { public void tearDown() { bigquery.delete( DatasetId.of(projectId, datasetName), DatasetDeleteOption.deleteContents()); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java index d4864f72c1e..837c1687726 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/BiqQueryReadIT.java @@ -45,7 +45,7 @@ public class BiqQueryReadIT { private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private BigQuery bigquery; private String datasetName; private String tableName; @@ -53,8 +53,7 @@ public class BiqQueryReadIT { @Before public void setUp() throws InterruptedException { bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); bigquery = BigQueryOptions.getDefaultInstance().getService(); @@ -81,7 +80,7 @@ public void setUp() throws InterruptedException { public void tearDown() { bigquery.delete( DatasetId.of(projectId, datasetName), DatasetDeleteOption.deleteContents()); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java index 19c4771e91d..fb82ae54543 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/PubSubWriteIT.java @@ -47,7 +47,7 @@ public class PubSubWriteIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private ByteArrayOutputStream bout; - private PrintStream out; + private final PrintStream originalOut = System.out; private String topicId; private String subscriptionId; TopicAdminClient topicAdminClient; @@ -64,8 +64,7 @@ public void setUp() throws Exception { requireEnvVar("GOOGLE_CLOUD_PROJECT"); bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); + System.setOut(new PrintStream(bout)); topicId = "test_topic_" + UUID.randomUUID().toString().substring(0, 8); subscriptionId = topicId + "-sub"; @@ -84,7 +83,7 @@ public void setUp() throws Exception { public void tearDown() { subscriptionAdminClient.deleteSubscription(SubscriptionName.of(PROJECT_ID, subscriptionId)); topicAdminClient.deleteTopic(TopicName.of(PROJECT_ID, topicId)); - System.setOut(null); + System.setOut(originalOut); } @Test diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java new file mode 100644 index 00000000000..d4e656b8264 --- /dev/null +++ b/dataflow/snippets/src/test/java/com/example/dataflow/ReadFromStorageIT.java @@ -0,0 +1,93 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.testing.RemoteStorageHelper; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.apache.beam.sdk.PipelineResult; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ReadFromStorageIT { + + private static final String projectId = System.getenv("GOOGLE_CLOUD_PROJECT"); + + private ByteArrayOutputStream bout; + private final PrintStream originalout = System.out; + + String bucketName; + Storage storage; + + private static final String[] lines = {"line 1", "line 2"}; + + @Before + public void setUp() { + // Redirect System.err to capture logs. + bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); + + // Create a Cloud Storage bucket with a text file. + RemoteStorageHelper helper = RemoteStorageHelper.create(); + storage = helper.getOptions().getService(); + bucketName = RemoteStorageHelper.generateBucketName(); + storage.create(BucketInfo.of(bucketName)); + + String objectName = "file1.txt"; + String contents = String.format("%s\n%s\n", lines[0], lines[1]); + + BlobId blobId = BlobId.of(bucketName, objectName); + BlobInfo blobInfo = BlobInfo.newBuilder(blobId).build(); + byte[] content = contents.getBytes(StandardCharsets.UTF_8); + + storage.create(blobInfo, content); + } + + @After + public void tearDown() throws ExecutionException, InterruptedException { + RemoteStorageHelper.forceDelete(storage, bucketName, 5, TimeUnit.SECONDS); + + System.setOut(originalout); + bout.reset(); + } + + @Test + public void readFromStorage_shouldReadFile() throws Exception { + + PipelineResult.State state = ReadFromStorage.main( + new String[] {"--runner=DirectRunner", "--bucket=" + bucketName}); + assertEquals(PipelineResult.State.DONE, state); + + String got = bout.toString(); + assertTrue(got.contains(lines[0])); + assertTrue(got.contains(lines[1])); + } +} From 002b4ae5a17babe61d2ec78ad47a6dc336fdd6d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Tue, 29 Oct 2024 20:58:59 +0100 Subject: [PATCH 02/66] feat(tpu): add tpu queued resources network (#9605) * Added tpu_queued_resources_network sample * Changed CODEOWNERS * Update CreateQueuedResourceWithNetwork.java --------- Co-authored-by: Eric Schmidt --- .github/CODEOWNERS | 1 + tpu/pom.xml | 101 +++++++++++++ .../tpu/CreateQueuedResourceWithNetwork.java | 139 ++++++++++++++++++ .../java/tpu/DeleteForceQueuedResource.java | 78 ++++++++++ tpu/src/main/java/tpu/GetQueuedResource.java | 54 +++++++ .../CreateQueuedResourceWithNetworkIT.java | 79 ++++++++++ tpu/src/test/java/tpu/Util.java | 87 +++++++++++ 7 files changed, 539 insertions(+) create mode 100644 tpu/pom.xml create mode 100644 tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java create mode 100644 tpu/src/main/java/tpu/DeleteForceQueuedResource.java create mode 100644 tpu/src/main/java/tpu/GetQueuedResource.java create mode 100644 tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java create mode 100644 tpu/src/test/java/tpu/Util.java diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ca79aba529f..f21afe0f659 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -44,6 +44,7 @@ /security-command-center @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra @GoogleCloudPlatform/gcp-security-command-center /servicedirectory @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra /webrisk @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra +/tpu @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/dee-infra # DEE Platform Ops (DEEPO) /errorreporting @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers diff --git a/tpu/pom.xml b/tpu/pom.xml new file mode 100644 index 00000000000..601db56977d --- /dev/null +++ b/tpu/pom.xml @@ -0,0 +1,101 @@ + + + + 4.0.0 + com.example.tpu + gce-diregapic-samples + 1.0-SNAPSHOT + + + + shared-configuration + com.google.cloud.samples + 1.2.0 + + + + 11 + 11 + + + + + com.google.cloud + google-cloud-tpu + 2.52.0 + + + + com.google.api + gax + + + + + google-cloud-storage + com.google.cloud + test + + + + truth + com.google.truth + test + 1.4.0 + + + junit + junit + test + 4.13.2 + + + + + org.junit.jupiter + junit-jupiter-engine + 5.10.2 + test + + + org.mockito + mockito-core + 5.13.0 + test + + + + + + + libraries-bom + com.google.cloud + import + pom + 26.40.0 + + + + + \ No newline at end of file diff --git a/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java new file mode 100644 index 00000000000..de9aa884aac --- /dev/null +++ b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java @@ -0,0 +1,139 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_network] +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.NetworkConfig; +import com.google.cloud.tpu.v2alpha1.Node; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class CreateQueuedResourceWithNetwork { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + // The name of the network you want the node to connect to. + // The network should be assigned to your project. + String networkName = "YOUR_COMPUTE_TPU_NETWORK"; + + createQueuedResourceWithNetwork(projectId, zone, queuedResourceId, nodeName, + tpuType, tpuSoftwareVersion, networkName); + } + + // Creates a Queued Resource with network configuration. + public static QueuedResource createQueuedResourceWithNetwork( + String projectId, String zone, String queuedResourceId, String nodeName, + String tpuType, String tpuSoftwareVersion, String networkName) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .createQueuedResourceSettings() + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(2.0) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setTotalTimeout(Duration.ofHours(24L)) + .build()); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + String region = zone.substring(0, zone.length() - 2); + + // Specify the network and subnetwork that you want to connect your TPU to. + NetworkConfig networkConfig = + NetworkConfig.newBuilder() + .setEnableExternalIps(true) + .setNetwork(String.format("projects/%s/global/networks/%s", projectId, networkName)) + .setSubnetwork( + String.format( + "projects/%s/regions/%s/subnetworks/%s", projectId, region, networkName)) + .build(); + + // Create a node + Node node = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .setNetworkConfig(networkConfig) + .setQueuedResource( + String.format( + "projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId)) + .build(); + + // Create queued resource + QueuedResource queuedResource = + QueuedResource.newBuilder() + .setName(queuedResourceId) + .setTpu( + QueuedResource.Tpu.newBuilder() + .addNodeSpec( + QueuedResource.Tpu.NodeSpec.newBuilder() + .setParent(parent) + .setNode(node) + .setNodeId(nodeName) + .build()) + .build()) + .build(); + + CreateQueuedResourceRequest request = + CreateQueuedResourceRequest.newBuilder() + .setParent(parent) + .setQueuedResource(queuedResource) + .setQueuedResourceId(queuedResourceId) + .build(); + + QueuedResource response = tpuClient.createQueuedResourceAsync(request).get(); + // You can wait until TPU Node is READY, + // and check its status using getTpuVm() from "tpu_vm_get" sample. + System.out.println("Queued Resource created: " + queuedResourceId); + return response; + } + } +} +//[END tpu_queued_resources_network] diff --git a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java new file mode 100644 index 00000000000..ed499d8cac5 --- /dev/null +++ b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java @@ -0,0 +1,78 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_delete_force] + +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.UnknownException; +import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class DeleteForceQueuedResource { + public static void main(String[] args) { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which the TPU was created. + String zone = "europe-west4-a"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + deleteForceQueuedResource(projectId, zone, queuedResourceId); + } + + // Deletes a Queued Resource asynchronously with --force flag. + public static void deleteForceQueuedResource( + String projectId, String zone, String queuedResourceId) { + String name = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .deleteQueuedResourceSettings() + .setRetrySettings( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(2.0) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setTotalTimeout(Duration.ofHours(24L)) + .build()); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + DeleteQueuedResourceRequest request = + DeleteQueuedResourceRequest.newBuilder().setName(name).setForce(true).build(); + + tpuClient.deleteQueuedResourceAsync(request).get(); + + } catch (UnknownException | InterruptedException | ExecutionException | IOException e) { + System.out.println(e.getMessage()); + } + System.out.printf("Deleted Queued Resource: %s\n", name); + } +} +//[END tpu_queued_resources_delete_force] diff --git a/tpu/src/main/java/tpu/GetQueuedResource.java b/tpu/src/main/java/tpu/GetQueuedResource.java new file mode 100644 index 00000000000..3a510e045fe --- /dev/null +++ b/tpu/src/main/java/tpu/GetQueuedResource.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_get] + +import com.google.cloud.tpu.v2alpha1.GetQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import java.io.IOException; + +public class GetQueuedResource { + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which the TPU was created. + String zone = "europe-west4-a"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + getQueuedResource(projectId, zone, queuedResourceId); + } + + // Get a Queued Resource. + public static QueuedResource getQueuedResource( + String projectId, String zone, String queuedResourceId) throws IOException { + String name = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + GetQueuedResourceRequest request = + GetQueuedResourceRequest.newBuilder().setName(name).build(); + + return tpuClient.getQueuedResource(request); + } + } +} +//[END tpu_queued_resources_get] diff --git a/tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java b/tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java new file mode 100644 index 00000000000..8e7f56fb97b --- /dev/null +++ b/tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java @@ -0,0 +1,79 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 6, unit = TimeUnit.MINUTES) +public class CreateQueuedResourceWithNetworkIT { + + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "europe-west4-a"; + static String javaVersion = System.getProperty("java.version").substring(0, 2); + private static final String NODE_NAME = "test-tpu-queued-resource-network-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + private static final String TPU_TYPE = "v2-8"; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; + private static final String QUEUED_RESOURCE_NAME = "queued-resource-network-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + private static final String NETWORK_NAME = "default"; + + @BeforeAll + public static void setUp() throws IOException { + + // Cleanup existing stale resources. + Util.cleanUpExistingQueuedResources("queued-resource-network-", PROJECT_ID, ZONE); + } + + @AfterAll + public static void cleanup() { + DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); + + // Test that resource is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetQueuedResource.getQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME)); + } + + @Test + public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { + + QueuedResource queuedResource = CreateQueuedResourceWithNetwork.createQueuedResourceWithNetwork( + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); + + assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getName()).isEqualTo(NODE_NAME); + assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getNetworkConfig().getNetwork() + .contains(NETWORK_NAME)); + assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getNetworkConfig().getSubnetwork() + .contains(NETWORK_NAME)); + } +} \ No newline at end of file diff --git a/tpu/src/test/java/tpu/Util.java b/tpu/src/test/java/tpu/Util.java new file mode 100644 index 00000000000..c5d4d839291 --- /dev/null +++ b/tpu/src/test/java/tpu/Util.java @@ -0,0 +1,87 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.protobuf.Timestamp; +import java.io.IOException; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.ExecutionException; + +public class Util { + private static final int DELETION_THRESHOLD_TIME_MINUTES = 30; + + // Delete TPU VMs which starts with the given prefixToDelete and + // has creation timestamp >30 minutes. + public static void cleanUpExistingQueuedResources( + String prefixToDelete, String projectId, String zone) + throws IOException { + try (com.google.cloud.tpu.v2alpha1.TpuClient tpuClient = + com.google.cloud.tpu.v2alpha1.TpuClient.create()) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + + for (QueuedResource queuedResource : tpuClient.listQueuedResources(parent).iterateAll()) { + + com.google.cloud.tpu.v2alpha1.Node node = queuedResource.getTpu().getNodeSpec(0).getNode(); + String creationTime = formatTimestamp(node.getCreateTime()); + String name = queuedResource.getName() + .substring(queuedResource.getName().lastIndexOf("/") + 1); + if (containPrefixToDeleteAndZone(queuedResource, prefixToDelete, zone) + && isCreatedBeforeThresholdTime(creationTime)) { + DeleteForceQueuedResource.deleteForceQueuedResource(projectId, zone, name); + } + } + } + } + + public static boolean containPrefixToDeleteAndZone( + Object resource, String prefixToDelete, String zone) { + boolean containPrefixAndZone = false; + try { + if (resource instanceof Node) { + containPrefixAndZone = ((Node) resource).getName().contains(prefixToDelete) + && ((Node) resource).getName().split("/")[3].contains(zone); + } + if (resource instanceof QueuedResource) { + containPrefixAndZone = ((QueuedResource) resource).getName().contains(prefixToDelete) + && ((QueuedResource) resource).getName().split("/")[3].contains(zone); + } + } catch (NullPointerException e) { + System.out.println("Resource not found, skipping deletion:"); + } + return containPrefixAndZone; + } + + public static boolean isCreatedBeforeThresholdTime(String timestamp) { + return OffsetDateTime.parse(timestamp).toInstant() + .isBefore(Instant.now().minus(DELETION_THRESHOLD_TIME_MINUTES, ChronoUnit.MINUTES)); + } + + private static String formatTimestamp(Timestamp timestamp) { + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + OffsetDateTime offsetDateTime = OffsetDateTime.ofInstant( + Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos()), + ZoneOffset.UTC); + return formatter.format(offsetDateTime); + } +} \ No newline at end of file From 5f6521713a95fb05fff51a3ffa9a5e3d6ffcb939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Wed, 30 Oct 2024 20:12:24 +0100 Subject: [PATCH 03/66] feat(compute): add compute reservation create from vm sample. (#9573) * Implemented compute_reservation_create_from_vm sample. created test * Cleaned reservations * Fixed lint issue * Revert "Fixed lint issue" This reverts commit c758b031c4e85284ab9d1bafdaf275394d9103b1. * Reverted changes * Fixed naming * Fixed naming * Fixed naming * Fixed naming * Created new test class for CreateReservationFromVm * Changed zone * Cleaned unused code * Fixed test and naming --- .../reservation/CreateReservationFromVm.java | 132 ++++++++++++++++++ .../CreateReservationFromVmIT.java | 115 +++++++++++++++ .../CrudOperationsReservationIT.java | 4 +- 3 files changed, 249 insertions(+), 2 deletions(-) create mode 100644 compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java create mode 100644 compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java new file mode 100644 index 00000000000..0b004cd19c3 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java @@ -0,0 +1,132 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_reservation_create_from_vm] + +import com.google.cloud.compute.v1.AcceleratorConfig; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; +import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.InsertReservationRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateReservationFromVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // The zone of the VM. In this zone the reservation will be created. + String zone = "us-central1-a"; + // The name of the reservation to create. + String reservationName = "YOUR_RESERVATION_NAME"; + // The name of the VM to create the reservation from. + String vmName = "YOUR_VM_NAME"; + + createComputeReservationFromVm(project, zone, reservationName, vmName); + } + + // Creates a compute reservation from an existing VM. + public static void createComputeReservationFromVm( + String project, String zone, String reservationName, String vmName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create(); + ReservationsClient reservationsClient = ReservationsClient.create()) { + Instance existingVm = instancesClient.get(project, zone, vmName); + + // Extract properties from the existing VM + List guestAccelerators = new ArrayList<>(); + if (!existingVm.getGuestAcceleratorsList().isEmpty()) { + for (AcceleratorConfig accelatorConfig : existingVm.getGuestAcceleratorsList()) { + guestAccelerators.add( + AcceleratorConfig.newBuilder() + .setAcceleratorCount(accelatorConfig.getAcceleratorCount()) + .setAcceleratorType(accelatorConfig.getAcceleratorType() + .substring(accelatorConfig.getAcceleratorType().lastIndexOf('/') + 1)) + .build()); + } + } + + List localSsds = + new ArrayList<>(); + if (!existingVm.getDisksList().isEmpty()) { + for (AttachedDisk disk : existingVm.getDisksList()) { + if (disk.getDiskSizeGb() >= 375) { + localSsds.add( + AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk.newBuilder() + .setDiskSizeGb(disk.getDiskSizeGb()) + .setInterface(disk.getInterface()) + .build()); + } + } + } + + AllocationSpecificSKUAllocationReservedInstanceProperties instanceProperties = + AllocationSpecificSKUAllocationReservedInstanceProperties.newBuilder() + .setMachineType( + existingVm.getMachineType() + .substring(existingVm.getMachineType().lastIndexOf('/') + 1)) + .setMinCpuPlatform(existingVm.getMinCpuPlatform()) + .addAllLocalSsds(localSsds) + .addAllGuestAccelerators(guestAccelerators) + .build(); + + Reservation reservation = + Reservation.newBuilder() + .setName(reservationName) + .setSpecificReservation( + AllocationSpecificSKUReservation.newBuilder() + .setCount(3) + .setInstanceProperties(instanceProperties) + .build()) + .setSpecificReservationRequired(true) + .build(); + + InsertReservationRequest insertReservationRequest = + InsertReservationRequest.newBuilder() + .setProject(project) + .setZone(zone) + .setReservationResource(reservation) + .build(); + + Operation response = reservationsClient + .insertAsync(insertReservationRequest).get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + System.out.println("Reservation creation failed ! ! " + response); + return; + } + System.out.println("Operation completed successfully."); + } + } +} +// [END compute_reservation_create_from_vm] \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java b/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java new file mode 100644 index 00000000000..e98dd20ba22 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/CreateReservationFromVmIT.java @@ -0,0 +1,115 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import compute.CreateInstance; +import compute.DeleteInstance; +import compute.Util; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class CreateReservationFromVmIT { + + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-east4-c"; + private static ReservationsClient reservationsClient; + private static InstancesClient instancesClient; + private static String reservationName; + private static String instanceForReservation; + static String javaVersion = System.getProperty("java.version").substring(0, 2); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + reservationsClient = ReservationsClient.create(); + instancesClient = InstancesClient.create(); + + reservationName = "test-reservation-from-vm-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + instanceForReservation = "test-instance-for-reserv-" + javaVersion + "-" + + UUID.randomUUID().toString().substring(0, 8); + + // Cleanup existing stale resources. + Util.cleanUpExistingInstances("test-instance-for-reserv-" + javaVersion, PROJECT_ID, ZONE); + Util.cleanUpExistingReservations("test-reservation-from-vm-" + javaVersion, PROJECT_ID, ZONE); + + CreateInstance.createInstance(PROJECT_ID, ZONE, instanceForReservation); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete resources created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, instanceForReservation); + + reservationsClient.close(); + instancesClient.close(); + } + + @Test + public void testCreateComputeReservationFromVm() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + CreateReservationFromVm.createComputeReservationFromVm( + PROJECT_ID, ZONE, reservationName, instanceForReservation); + + Instance instance = instancesClient.get(PROJECT_ID, ZONE, instanceForReservation); + Reservation reservation = + reservationsClient.get(PROJECT_ID, ZONE, reservationName); + + Assertions.assertNotNull(reservation); + assertThat(reservation.getName()).isEqualTo(reservationName); + Assertions.assertEquals(instance.getMinCpuPlatform(), + reservation.getSpecificReservation().getInstanceProperties().getMinCpuPlatform()); + Assertions.assertEquals(instance.getGuestAcceleratorsList(), + reservation.getSpecificReservation().getInstanceProperties().getGuestAcceleratorsList()); + + DeleteReservation.deleteReservation(PROJECT_ID, ZONE, reservationName); + + // Test that reservation is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetReservation.getReservation(PROJECT_ID, reservationName, ZONE)); + } +} diff --git a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java index 9c5ea9710e4..5dae4a347d6 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java @@ -74,10 +74,10 @@ public static void setUp() @AfterAll public static void cleanup() throws IOException, ExecutionException, InterruptedException, TimeoutException { - // Delete all reservations created for testing. + // Delete resources created for testing. DeleteReservation.deleteReservation(PROJECT_ID, ZONE, RESERVATION_NAME); - // Test that reservations are deleted + // Test that reservation is deleted Assertions.assertThrows( NotFoundException.class, () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME, ZONE)); From 3ff39e738b5b2ff6a140d24fa45226f9057dba6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Wed, 30 Oct 2024 20:40:56 +0100 Subject: [PATCH 04/66] feat(compute): add compute instance/instance template not consume reservation samples. (#9564) * Added compute_instance_not_consume_reservation sample and test. * Added timeout * Fixed test * Fixed test * Created ConsumeReservationIT cals, fixed ReservationIT * Fixed ReservationIT * changed zone * changed zone * Added compute_template_not_consume_reservation sample, fixed tests * Added timeout * Fixed Instance creation * Fixed Instance creation * Disabled Instance creation * Fixed code * Fixed comments * Added comments and code * Fixed code following recommendations --- ...teInstanceWithoutConsumingReservation.java | 122 ++++++++++++++++++ .../reservation/CreateReservation.java | 2 +- ...teTemplateWithoutConsumingReservation.java | 110 ++++++++++++++++ .../reservation/ConsumeReservationIT.java | 101 +++++++++++++++ 4 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java create mode 100644 compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java create mode 100644 compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java new file mode 100644 index 00000000000..df278717286 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateInstanceWithoutConsumingReservation.java @@ -0,0 +1,122 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_instance_not_consume_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateInstanceWithoutConsumingReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone you want to use. + String zone = "us-central1-a"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * This value uses the format zones/{zone}/machineTypes/{type_name}. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-1"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + + createInstanceWithoutConsumingReservationAsync(projectId, zone, instanceName, + machineTypeName, sourceImage, diskSizeGb, networkName); + } + + // Create a virtual machine that explicitly doesn't consume reservations + public static Instance createInstanceWithoutConsumingReservationAsync( + String project, String zone, String instanceName, + String machineTypeName, String sourceImage, long diskSizeGb, String networkName) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(NO_RESERVATION.toString()) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(project) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + + // Wait for the operation to complete. + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(project, zone, instanceName); + } + } +} +// [END compute_instance_not_consume_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java index 59f9077d448..3ea126d692a 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java @@ -16,6 +16,7 @@ package compute.reservation; +// [START compute_reservation_create] import com.google.cloud.compute.v1.AcceleratorConfig; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; @@ -28,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -// [START compute_reservation_create] public class CreateReservation { public static void main(String[] args) diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java new file mode 100644 index 00000000000..2857b3288bd --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateTemplateWithoutConsumingReservation.java @@ -0,0 +1,110 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_template_not_consume_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; + +import com.google.cloud.compute.v1.AccessConfig; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceTemplateRequest; +import com.google.cloud.compute.v1.InstanceProperties; +import com.google.cloud.compute.v1.InstanceTemplate; +import com.google.cloud.compute.v1.InstanceTemplatesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateTemplateWithoutConsumingReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the template you want to query. + String templateName = "YOUR_INSTANCE_TEMPLATE_NAME"; + String machineType = "e2-standard-4"; + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + + createTemplateWithoutConsumingReservationAsync( + projectId, templateName, machineType, sourceImage); + } + + + // Create a template that explicitly doesn't consume any reservations. + public static InstanceTemplate createTemplateWithoutConsumingReservationAsync( + String projectId, String templateName, String machineType, String sourceImage) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstanceTemplatesClient instanceTemplatesClient = InstanceTemplatesClient.create()) { + AttachedDisk attachedDisk = AttachedDisk.newBuilder() + .setInitializeParams(AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskType("pd-balanced") + .setDiskSizeGb(250) + .build()) + .setAutoDelete(true) + .setBoot(true) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName("global/networks/default") + .addAccessConfigs(AccessConfig.newBuilder() + .setName("External NAT") + .setType(AccessConfig.Type.ONE_TO_ONE_NAT.toString()) + .setNetworkTier(AccessConfig.NetworkTier.PREMIUM.toString()) + .build()) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(NO_RESERVATION.toString()) + .build(); + + InstanceProperties instanceProperties = InstanceProperties.newBuilder() + .addDisks(attachedDisk) + .setMachineType(machineType) + .setReservationAffinity(reservationAffinity) + .addNetworkInterfaces(networkInterface) + .build(); + + InsertInstanceTemplateRequest insertInstanceTemplateRequest = InsertInstanceTemplateRequest + .newBuilder() + .setProject(projectId) + .setInstanceTemplateResource(InstanceTemplate.newBuilder() + .setName(templateName) + .setProperties(instanceProperties) + .build()) + .build(); + + Operation response = instanceTemplatesClient.insertAsync(insertInstanceTemplateRequest) + .get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instanceTemplatesClient.get(projectId, templateName); + } + } +} +// [END compute_template_not_consume_reservation] diff --git a/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java new file mode 100644 index 00000000000..066eb5711ad --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java @@ -0,0 +1,101 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.NO_RESERVATION; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstanceTemplate; +import compute.DeleteInstance; +import compute.DeleteInstanceTemplate; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class ConsumeReservationIT { + + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-central1-a"; + private static final String INSTANCE_NOT_CONSUME_RESERVATION_NAME = + "test-instance-not-consume-" + UUID.randomUUID().toString().substring(0, 8); + private static final String TEMPLATE_NOT_CONSUME_RESERVATION_NAME = + "test-template-not-consume-" + UUID.randomUUID().toString().substring(0, 8); + private static final String MACHINE_TYPE_NAME = "n1-standard-1"; + private static final String SOURCE_IMAGE = "projects/debian-cloud/global/images/family/debian-11"; + private static final String NETWORK_NAME = "default"; + private static final long DISK_SIZE_GD = 10L; + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete the instance created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_NOT_CONSUME_RESERVATION_NAME); + DeleteInstanceTemplate.deleteInstanceTemplate( + PROJECT_ID, TEMPLATE_NOT_CONSUME_RESERVATION_NAME); + } + + @Test + public void testCreateInstanceNotConsumeReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = CreateInstanceWithoutConsumingReservation + .createInstanceWithoutConsumingReservationAsync( + PROJECT_ID, ZONE, INSTANCE_NOT_CONSUME_RESERVATION_NAME, MACHINE_TYPE_NAME, + SOURCE_IMAGE, DISK_SIZE_GD, NETWORK_NAME); + + Assertions.assertNotNull(instance); + Assertions.assertEquals(NO_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testCreateTemplateNotConsumeReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + InstanceTemplate template = + CreateTemplateWithoutConsumingReservation.createTemplateWithoutConsumingReservationAsync( + PROJECT_ID, TEMPLATE_NOT_CONSUME_RESERVATION_NAME, + MACHINE_TYPE_NAME, SOURCE_IMAGE); + + Assertions.assertNotNull(template); + Assertions.assertEquals(NO_RESERVATION.toString(), + template.getPropertiesOrBuilder().getReservationAffinity().getConsumeReservationType()); + } +} From 55d96d4d2bcd34ac52fe9d76a5a525ccc8a187f6 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 31 Oct 2024 00:13:48 +0100 Subject: [PATCH 05/66] fix(deps): update dependency org.eclipse.jetty:jetty-server to v9.4.56.v20240826 [security] (#9579) --- appengine-java11/appengine-simple-jetty-main/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appengine-java11/appengine-simple-jetty-main/pom.xml b/appengine-java11/appengine-simple-jetty-main/pom.xml index fbc38d30133..6ad52162b64 100644 --- a/appengine-java11/appengine-simple-jetty-main/pom.xml +++ b/appengine-java11/appengine-simple-jetty-main/pom.xml @@ -21,7 +21,7 @@ UTF-8 11 11 - 9.4.54.v20240208 + 9.4.56.v20240826 From 286dd4463a0574623dcab4a939904e0231719a56 Mon Sep 17 00:00:00 2001 From: Jacek Spalinski <69755075+jacspa96@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:51:52 +0100 Subject: [PATCH 06/66] feat(dataplex): adjust code snippets (#9608) * feat(dataplex): adjust snippets * feat(dataplex): fix small typos in tests * feat(dataplex): small adjustment in tests --------- Co-authored-by: Jacek Spalinski --- .../main/java/dataplex/CreateAspectType.java | 38 +++++++------- .../main/java/dataplex/CreateEntryGroup.java | 13 ++--- .../main/java/dataplex/CreateEntryType.java | 34 ++++++------- .../main/java/dataplex/DeleteAspectType.java | 9 ++-- .../main/java/dataplex/DeleteEntryGroup.java | 9 ++-- .../main/java/dataplex/DeleteEntryType.java | 9 ++-- .../src/main/java/dataplex/GetAspectType.java | 9 ++-- .../src/main/java/dataplex/GetEntryGroup.java | 9 ++-- .../src/main/java/dataplex/GetEntryType.java | 9 ++-- .../main/java/dataplex/ListAspectTypes.java | 9 ++-- .../main/java/dataplex/ListEntryGroups.java | 9 ++-- .../main/java/dataplex/ListEntryTypes.java | 9 ++-- .../main/java/dataplex/UpdateAspectType.java | 49 +++++++++---------- .../main/java/dataplex/UpdateEntryGroup.java | 27 +++++----- .../main/java/dataplex/UpdateEntryType.java | 27 +++++----- .../src/test/java/dataplex/AspectTypeIT.java | 8 +-- .../src/test/java/dataplex/EntryGroupIT.java | 8 +-- .../src/test/java/dataplex/EntryTypeIT.java | 12 +++-- 18 files changed, 128 insertions(+), 169 deletions(-) diff --git a/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java b/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java index b8fa5ad18da..5ef598a69b5 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateAspectType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.LocationName; import java.util.List; -// Sample to create Aspect Type public class CreateAspectType { public static void main(String[] args) throws Exception { @@ -59,33 +58,32 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created aspect type: " + createdAspectType.getName()); } + // Method to create Aspect Type located in projectId, location and with aspectTypeId and + // aspectFields specifying schema of the Aspect Type public static AspectType createAspectType( String projectId, String location, String aspectTypeId, List aspectFields) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - AspectType aspectType = - AspectType.newBuilder() - .setDescription("description of the aspect type") - .setMetadataTemplate( - AspectType.MetadataTemplate.newBuilder() - // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ - // That means name must only contain alphanumeric character or dashes or - // underscores, start with an alphabet, and must be less than 63 characters. - .setName("name_of_the_template") - .setType("record") - // Aspect Type fields, that themselves are Metadata Templates - .addAllRecordFields(aspectFields) - .build()) - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + AspectType aspectType = + AspectType.newBuilder() + .setDescription("description of the aspect type") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ + // That means name must only contain alphanumeric character or dashes or + // underscores, start with an alphabet, and must be less than 63 characters. + .setName("name_of_the_template") + .setType("record") + // Aspect Type fields, that themselves are Metadata Templates + .addAllRecordFields(aspectFields) + .build()) + .build(); return client.createAspectTypeAsync(locationName, aspectType, aspectTypeId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java index 20385e52d0d..3df7feeb515 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntryGroup.java @@ -21,7 +21,6 @@ import com.google.cloud.dataplex.v1.EntryGroup; import com.google.cloud.dataplex.v1.LocationName; -// Samples to create Entry Group public class CreateEntryGroup { public static void main(String[] args) throws Exception { @@ -35,17 +34,15 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created entry group: " + createdEntryGroup.getName()); } + // Method to create Entry Group located in projectId, location and with entryGroupId public static EntryGroup createEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - EntryGroup entryGroup = - EntryGroup.newBuilder().setDescription("description of the entry group").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + EntryGroup entryGroup = + EntryGroup.newBuilder().setDescription("description of the entry group").build(); return client.createEntryGroupAsync(locationName, entryGroup, entryGroupId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java b/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java index 4b35f120c3b..190e35d8f32 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntryType.java @@ -21,7 +21,6 @@ import com.google.cloud.dataplex.v1.EntryType; import com.google.cloud.dataplex.v1.LocationName; -// Samples to create Entry Type public class CreateEntryType { public static void main(String[] args) throws Exception { @@ -35,27 +34,26 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully created entry type: " + createdEntryType.getName()); } + // Method to create Entry Type located in projectId, location and with entryTypeId public static EntryType createEntryType(String projectId, String location, String entryTypeId) throws Exception { - LocationName locationName = LocationName.of(projectId, location); - EntryType entryType = - EntryType.newBuilder() - .setDescription("description of the entry type") - // Required aspects will need to be attached to every entry created for this entry type. - // You cannot change required aspects for entry type once it is created. - .addRequiredAspects( - EntryType.AspectInfo.newBuilder() - // Example of system aspect type. - // It is also possible to specify custom aspect type. - .setType("projects/dataplex-types/locations/global/aspectTypes/schema") - .build()) - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); + EntryType entryType = + EntryType.newBuilder() + .setDescription("description of the entry type") + // Required aspects will need to be attached to every entry created for this entry + // type. + // You cannot change required aspects for entry type once it is created. + .addRequiredAspects( + EntryType.AspectInfo.newBuilder() + // Example of system aspect type. + // It is also possible to specify custom aspect type. + .setType("projects/dataplex-types/locations/global/aspectTypes/schema") + .build()) + .build(); return client.createEntryTypeAsync(locationName, entryType, entryTypeId).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java b/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java index a329d4e6b3d..37be0713bb1 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteAspectType.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.AspectTypeName; import com.google.cloud.dataplex.v1.CatalogServiceClient; -// Sample to delete Aspect Type public class DeleteAspectType { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted aspect type"); } + // Method to delete Aspect Type located in projectId, location and with aspectTypeId public static void deleteAspectType(String projectId, String location, String aspectTypeId) throws Exception { - AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); client.deleteAspectTypeAsync(aspectTypeName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java index da4445edecd..6a7935b7b18 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntryGroup.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import com.google.cloud.dataplex.v1.EntryGroupName; -// Sample to delete Entry Group public class DeleteEntryGroup { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted entry group"); } + // Method to delete Entry Group located in projectId, location and with entryGroupId public static void deleteEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); client.deleteEntryGroupAsync(entryGroupName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java index 8662af2d31e..2c2fc66b91d 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntryType.java @@ -20,7 +20,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import com.google.cloud.dataplex.v1.EntryTypeName; -// Sample to delete Entry Type public class DeleteEntryType { public static void main(String[] args) throws Exception { @@ -34,15 +33,13 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully deleted entry type"); } + // Method to delete Entry Type located in projectId, location and with entryTypeId public static void deleteEntryType(String projectId, String location, String entryTypeId) throws Exception { - EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); client.deleteEntryTypeAsync(entryTypeName).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetAspectType.java b/dataplex/snippets/src/main/java/dataplex/GetAspectType.java index 33c14191f88..92e21ea1e4d 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/GetAspectType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.CatalogServiceClient; import java.io.IOException; -// Sample to get Aspect Type public class GetAspectType { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Aspect type retrieved successfully: " + aspectType.getName()); } + // Method to retrieve Aspect Type located in projectId, location and with aspectTypeId public static AspectType getAspectType(String projectId, String location, String aspectTypeId) throws IOException { - AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectTypeName aspectTypeName = AspectTypeName.of(projectId, location, aspectTypeId); return client.getAspectType(aspectTypeName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java index d3073d4615a..eef9d7a9e76 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntryGroup.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryGroupName; import java.io.IOException; -// Sample to get Entry Group public class GetEntryGroup { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Entry group retrieved successfully: " + entryGroup.getName()); } + // Method to retrieve Entry Group located in projectId, location and with entryGroupId public static EntryGroup getEntryGroup(String projectId, String location, String entryGroupId) throws IOException { - EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroupName entryGroupName = EntryGroupName.of(projectId, location, entryGroupId); return client.getEntryGroup(entryGroupName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntryType.java b/dataplex/snippets/src/main/java/dataplex/GetEntryType.java index 36ed5038709..87cf18ef423 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntryType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryTypeName; import java.io.IOException; -// Sample to get Entry Type public class GetEntryType { public static void main(String[] args) throws IOException { @@ -36,15 +35,13 @@ public static void main(String[] args) throws IOException { System.out.println("Entry type retrieved successfully: " + entryType.getName()); } + // Method to retrieve Entry Type located in projectId, location and with entryTypeId public static EntryType getEntryType(String projectId, String location, String entryTypeId) throws IOException { - EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryTypeName entryTypeName = EntryTypeName.of(projectId, location, entryTypeId); return client.getEntryType(entryTypeName); } } diff --git a/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java b/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java index 2f6b0181aa4..73b9dbab517 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java +++ b/dataplex/snippets/src/main/java/dataplex/ListAspectTypes.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Aspect Types public class ListAspectTypes { public static void main(String[] args) throws IOException { @@ -38,15 +37,13 @@ public static void main(String[] args) throws IOException { aspectType -> System.out.println("Aspect type name: " + aspectType.getName())); } + // Method to list Aspect Types located in projectId and location public static List listAspectTypes(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListAspectTypesPagedResponse listAspectTypesResponse = client.listAspectTypes(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java b/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java index e66c9043052..b30422f3805 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntryGroups.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Entry Groups public class ListEntryGroups { public static void main(String[] args) throws IOException { @@ -38,15 +37,13 @@ public static void main(String[] args) throws IOException { entryGroup -> System.out.println("Entry group name: " + entryGroup.getName())); } + // Method to list Entry Groups located in projectId and location public static List listEntryGroups(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListEntryGroupsPagedResponse listEntryGroupsResponse = client.listEntryGroups(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java b/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java index 30bb5de05c2..35eeefb3ac3 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntryTypes.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -// Sample to list Entry Types public class ListEntryTypes { public static void main(String[] args) throws IOException { @@ -37,15 +36,13 @@ public static void main(String[] args) throws IOException { entryTypes.forEach(entryType -> System.out.println("Entry type name: " + entryType.getName())); } + // Method to list Entry Types located in projectId and location public static List listEntryTypes(String projectId, String location) throws IOException { - LocationName locationName = LocationName.of(projectId, location); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + LocationName locationName = LocationName.of(projectId, location); CatalogServiceClient.ListEntryTypesPagedResponse listEntryTypesResponse = client.listEntryTypes(locationName); // Paging is implicitly handled by .iterateAll(), all results will be returned diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java b/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java index cb0c9b1b4d7..49572df174e 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateAspectType.java @@ -23,7 +23,6 @@ import com.google.protobuf.FieldMask; import java.util.List; -// Sample to update Aspect Type public class UpdateAspectType { public static void main(String[] args) throws Exception { @@ -60,39 +59,37 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated aspect type: " + updatedAspectType.getName()); } + // Method to update Aspect Type located in projectId, location and with aspectTypeId and + // aspectFields specifying schema of the Aspect Type public static AspectType updateAspectType( String projectId, String location, String aspectTypeId, List aspectFields) throws Exception { - AspectType aspectType = - AspectType.newBuilder() - .setName(AspectTypeName.of(projectId, location, aspectTypeId).toString()) - .setDescription("updated description of the aspect type") - .setMetadataTemplate( - AspectType.MetadataTemplate.newBuilder() - // Because Record Fields is an array, it needs to be fully replaced. - // It is because you do not have a way to specify array elements in update mask. - .addAllRecordFields(aspectFields) - .build()) - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = - FieldMask.newBuilder() - .addPaths("description") - .addPaths("metadata_template.record_fields") - .build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + AspectType aspectType = + AspectType.newBuilder() + .setName(AspectTypeName.of(projectId, location, aspectTypeId).toString()) + .setDescription("updated description of the aspect type") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + // Because Record Fields is an array, it needs to be fully replaced. + // It is because you do not have a way to specify array elements in update + // mask. + .addAllRecordFields(aspectFields) + .build()) + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = + FieldMask.newBuilder() + .addPaths("description") + .addPaths("metadata_template.record_fields") + .build(); return client.updateAspectTypeAsync(aspectType, updateMask).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java index bc0a0c727e6..4bae947e317 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntryGroup.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryGroupName; import com.google.protobuf.FieldMask; -// Sample to update Entry Group public class UpdateEntryGroup { public static void main(String[] args) throws Exception { @@ -36,25 +35,21 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated entry group: " + updatedEntryGroup.getName()); } + // Method to update Entry Group located in projectId, location and with entryGroupId public static EntryGroup updateEntryGroup(String projectId, String location, String entryGroupId) throws Exception { - EntryGroup entryGroup = - EntryGroup.newBuilder() - .setName(EntryGroupName.of(projectId, location, entryGroupId).toString()) - .setDescription("updated description of the entry group") - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryGroup entryGroup = + EntryGroup.newBuilder() + .setName(EntryGroupName.of(projectId, location, entryGroupId).toString()) + .setDescription("updated description of the entry group") + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); return client.updateEntryGroupAsync(entryGroup, updateMask).get(); } } diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java index daac40a35f3..d0c3a245077 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntryType.java @@ -22,7 +22,6 @@ import com.google.cloud.dataplex.v1.EntryTypeName; import com.google.protobuf.FieldMask; -// Sample to update Entry Type public class UpdateEntryType { public static void main(String[] args) throws Exception { @@ -36,25 +35,21 @@ public static void main(String[] args) throws Exception { System.out.println("Successfully updated entry type: " + updatedEntryType.getName()); } + // Method to update Entry Type located in projectId, location and with entryTypeId public static EntryType updateEntryType(String projectId, String location, String entryTypeId) throws Exception { - EntryType entryType = - EntryType.newBuilder() - .setName(EntryTypeName.of(projectId, location, entryTypeId).toString()) - .setDescription("updated description of the entry type") - .build(); - - // Update mask specifies which fields will be updated. - // If empty mask is given, all modifiable fields from the request will be used for update. - // If update mask is specified as "*" it is treated as full update, - // that means fields not present in the request will be emptied. - FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources, - // or use "try-with-close" statement to do this automatically. + // once, and can be reused for multiple requests. try (CatalogServiceClient client = CatalogServiceClient.create()) { + EntryType entryType = + EntryType.newBuilder() + .setName(EntryTypeName.of(projectId, location, entryTypeId).toString()) + .setDescription("updated description of the entry type") + .build(); + + // Update mask specifies which fields will be updated. + // For more information on update masks, see: https://google.aip.dev/161 + FieldMask updateMask = FieldMask.newBuilder().addPaths("description").build(); return client.updateEntryTypeAsync(entryType, updateMask).get(); } } diff --git a/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java b/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java index 35f5d4df991..066d43a6b42 100644 --- a/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java +++ b/dataplex/snippets/src/test/java/dataplex/AspectTypeIT.java @@ -31,7 +31,7 @@ public class AspectTypeIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String aspectTypeId = "test-aspect-type" + ID; + private static final String aspectTypeId = "test-aspect-type-" + ID; private static String expectedAspectType; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -79,7 +79,8 @@ public void testUpdateAspectType() throws Exception { @Test public void testCreateAspectType() throws Exception { - String aspectTypeIdToCreate = "test-aspect-type" + UUID.randomUUID().toString().substring(0, 8); + String aspectTypeIdToCreate = + "test-aspect-type-" + UUID.randomUUID().toString().substring(0, 8); String expectedAspectTypeToCreate = String.format( "projects/%s/locations/%s/aspectTypes/%s", PROJECT_ID, LOCATION, aspectTypeIdToCreate); @@ -95,7 +96,8 @@ public void testCreateAspectType() throws Exception { @Test public void testDeleteAspectType() throws Exception { - String aspectTypeIdToDelete = "test-aspect-type" + UUID.randomUUID().toString().substring(0, 8); + String aspectTypeIdToDelete = + "test-aspect-type-" + UUID.randomUUID().toString().substring(0, 8); // Create Aspect Type to be deleted CreateAspectType.createAspectType( PROJECT_ID, LOCATION, aspectTypeIdToDelete, new ArrayList<>()); diff --git a/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java b/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java index 76456e8edef..8e2608b8c06 100644 --- a/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java +++ b/dataplex/snippets/src/test/java/dataplex/EntryGroupIT.java @@ -30,7 +30,7 @@ public class EntryGroupIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String entryGroupId = "test-entry-group" + ID; + private static final String entryGroupId = "test-entry-group-" + ID; private static String expectedEntryGroup; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -77,7 +77,8 @@ public void testUpdateEntryGroup() throws Exception { @Test public void testCreateEntryGroup() throws Exception { - String entryGroupIdToCreate = "test-entry-group" + UUID.randomUUID().toString().substring(0, 8); + String entryGroupIdToCreate = + "test-entry-group-" + UUID.randomUUID().toString().substring(0, 8); String expectedEntryGroupToCreate = String.format( "projects/%s/locations/%s/entryGroups/%s", PROJECT_ID, LOCATION, entryGroupIdToCreate); @@ -92,7 +93,8 @@ public void testCreateEntryGroup() throws Exception { @Test public void testDeleteEntryGroup() throws Exception { - String entryGroupIdToDelete = "test-entry-group" + UUID.randomUUID().toString().substring(0, 8); + String entryGroupIdToDelete = + "test-entry-group-" + UUID.randomUUID().toString().substring(0, 8); // Create Entry Group to be deleted CreateEntryGroup.createEntryGroup(PROJECT_ID, LOCATION, entryGroupIdToDelete); diff --git a/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java b/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java index e6116d637ff..a410e785c45 100644 --- a/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java +++ b/dataplex/snippets/src/test/java/dataplex/EntryTypeIT.java @@ -30,7 +30,7 @@ public class EntryTypeIT { private static final String ID = UUID.randomUUID().toString().substring(0, 8); private static final String LOCATION = "us-central1"; - private static final String entryTypeId = "test-entry-type" + ID; + private static final String entryTypeId = "test-entry-type-" + ID; private static String expectedEntryType; private static final String PROJECT_ID = requireProjectIdEnvVar(); @@ -48,6 +48,7 @@ public static void checkRequirements() { } @BeforeClass + // Set-up code that will be executed before all tests public static void setUp() throws Exception { expectedEntryType = String.format("projects/%s/locations/%s/entryTypes/%s", PROJECT_ID, LOCATION, entryTypeId); @@ -75,7 +76,7 @@ public void testUpdateEntryType() throws Exception { @Test public void testCreateEntryType() throws Exception { - String entryTypeIdToCreate = "test-entry-type" + UUID.randomUUID().toString().substring(0, 8); + String entryTypeIdToCreate = "test-entry-type-" + UUID.randomUUID().toString().substring(0, 8); String expectedEntryTypeToCreate = String.format( "projects/%s/locations/%s/entryTypes/%s", PROJECT_ID, LOCATION, entryTypeIdToCreate); @@ -90,8 +91,8 @@ public void testCreateEntryType() throws Exception { @Test public void testDeleteEntryType() throws Exception { - String entryTypeIdToDelete = "test-entry-type" + UUID.randomUUID().toString().substring(0, 8); - // Create Entry Group to be deleted + String entryTypeIdToDelete = "test-entry-type-" + UUID.randomUUID().toString().substring(0, 8); + // Create Entry Type to be deleted CreateEntryType.createEntryType(PROJECT_ID, LOCATION, entryTypeIdToDelete); // No exception means successful call. @@ -99,8 +100,9 @@ public void testDeleteEntryType() throws Exception { } @AfterClass + // Clean-up code that will be executed after all tests public static void tearDown() throws Exception { - // Clean-up Entry Group resource created in setUp() + // Clean-up Entry Type resource created in setUp() DeleteEntryType.deleteEntryType(PROJECT_ID, LOCATION, entryTypeId); } } From 22e1c11019110bab455c7cd6d9f698cdcd476106 Mon Sep 17 00:00:00 2001 From: Jacek Spalinski <69755075+jacspa96@users.noreply.github.com> Date: Thu, 7 Nov 2024 10:02:30 +0100 Subject: [PATCH 07/66] feat(dataplex): link documentation for available locations (#9647) Co-authored-by: Jacek Spalinski --- dataplex/snippets/src/main/java/dataplex/CreateEntry.java | 1 + dataplex/snippets/src/main/java/dataplex/DeleteEntry.java | 1 + dataplex/snippets/src/main/java/dataplex/GetEntry.java | 1 + dataplex/snippets/src/main/java/dataplex/ListEntries.java | 1 + dataplex/snippets/src/main/java/dataplex/LookupEntry.java | 1 + dataplex/snippets/src/main/java/dataplex/UpdateEntry.java | 1 + 6 files changed, 6 insertions(+) diff --git a/dataplex/snippets/src/main/java/dataplex/CreateEntry.java b/dataplex/snippets/src/main/java/dataplex/CreateEntry.java index 63607563497..b4d1a7a7fbe 100644 --- a/dataplex/snippets/src/main/java/dataplex/CreateEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/CreateEntry.java @@ -31,6 +31,7 @@ public class CreateEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java b/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java index 1f9acc8a833..7e8467324e5 100644 --- a/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/DeleteEntry.java @@ -25,6 +25,7 @@ public class DeleteEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/GetEntry.java b/dataplex/snippets/src/main/java/dataplex/GetEntry.java index 73773bf93bc..e1580f17a19 100644 --- a/dataplex/snippets/src/main/java/dataplex/GetEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/GetEntry.java @@ -29,6 +29,7 @@ public class GetEntry { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/ListEntries.java b/dataplex/snippets/src/main/java/dataplex/ListEntries.java index be54b674563..ec564c12fc3 100644 --- a/dataplex/snippets/src/main/java/dataplex/ListEntries.java +++ b/dataplex/snippets/src/main/java/dataplex/ListEntries.java @@ -30,6 +30,7 @@ public class ListEntries { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/LookupEntry.java b/dataplex/snippets/src/main/java/dataplex/LookupEntry.java index e4f19c79920..f32774d12d4 100644 --- a/dataplex/snippets/src/main/java/dataplex/LookupEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/LookupEntry.java @@ -29,6 +29,7 @@ public class LookupEntry { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; diff --git a/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java b/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java index 0127c3e4a20..d3cee2cc74f 100644 --- a/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java +++ b/dataplex/snippets/src/main/java/dataplex/UpdateEntry.java @@ -32,6 +32,7 @@ public class UpdateEntry { public static void main(String[] args) throws Exception { // TODO(developer): Replace these variables before running the sample. String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations String location = "MY_LOCATION"; String entryGroupId = "MY_ENTRY_GROUP_ID"; String entryId = "MY_ENTRY_ID"; From 5c97f2709b97ff8605bad820ddeb4e0b4b53af81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Thu, 7 Nov 2024 11:41:07 +0100 Subject: [PATCH 08/66] feat(compute): extend compute hyperdisk pool create (#9595) * Added performance-provisioning-type, fixed test * fixed test * Enabled tests * Fixed whitespaces * Added comments * Fixed comments and zone * Fixed timeout and zone in test class * Fixed naming, added variable CAPACITY_PROVISIONING_TYPE * Fixed storagePoolType * Fixed lint issue --- .../CreateHyperdiskStoragePool.java | 22 ++++---- .../test/java/compute/disks/HyperdisksIT.java | 51 ++++++++++--------- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java b/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java index 61239faa495..30cdde803d0 100644 --- a/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java +++ b/compute/cloud-client/src/main/java/compute/disks/storagepool/CreateHyperdiskStoragePool.java @@ -15,7 +15,6 @@ package compute.disks.storagepool; // [START compute_hyperdisk_pool_create] - import com.google.cloud.compute.v1.InsertStoragePoolRequest; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.StoragePool; @@ -32,12 +31,13 @@ public static void main(String[] args) // Project ID or project number of the Google Cloud project you want to use. String projectId = "YOUR_PROJECT_ID"; // Name of the zone in which you want to create the storagePool. - String zone = "europe-central2-b"; + String zone = "us-central1-a"; // Name of the storagePool you want to create. String storagePoolName = "YOUR_STORAGE_POOL_NAME"; - // The type of disk you want to create. This value uses the following format: - // "projects/%s/zones/%s/storagePoolTypes/hyperdisk-throughput|hyperdisk-balanced" - String storagePoolType = "hyperdisk-balanced"; + // The type of disk you want to create. + // Storage types can be "hyperdisk-throughput" or "hyperdisk-balanced" + String storagePoolType = String.format( + "projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", projectId, zone); // Optional: the capacity provisioning type of the storage pool. // The allowed values are advanced and standard. If not specified, the value advanced is used. String capacityProvisioningType = "advanced"; @@ -48,16 +48,19 @@ public static void main(String[] args) long provisionedIops = 3000; // the throughput in MBps to provision for the storage pool. long provisionedThroughput = 140; + // The allowed values are low-casing strings "advanced" and "standard". + // If not specified, "advanced" is used. + String performanceProvisioningType = "advanced"; createHyperdiskStoragePool(projectId, zone, storagePoolName, storagePoolType, - capacityProvisioningType, provisionedCapacity, provisionedIops, provisionedThroughput); + capacityProvisioningType, provisionedCapacity, provisionedIops, + provisionedThroughput, performanceProvisioningType); } // Creates a hyperdisk storagePool in a project public static StoragePool createHyperdiskStoragePool(String projectId, String zone, - String storagePoolName, String storagePoolType, - String capacityProvisioningType, long capacity, - long iops, long throughput) + String storagePoolName, String storagePoolType, String capacityProvisioningType, + long capacity, long iops, long throughput, String performanceProvisioningType) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. @@ -71,6 +74,7 @@ public static StoragePool createHyperdiskStoragePool(String projectId, String zo .setPoolProvisionedCapacityGb(capacity) .setPoolProvisionedIops(iops) .setPoolProvisionedThroughput(throughput) + .setPerformanceProvisioningType(performanceProvisioningType) .build(); InsertStoragePoolRequest request = InsertStoragePoolRequest.newBuilder() diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java index b0d4b0fee91..1ec9e09aff6 100644 --- a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java +++ b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java @@ -29,25 +29,27 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.junit.Assert; -import org.junit.FixMethodOrder; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.junit.runners.MethodSorters; @RunWith(JUnit4.class) -@Timeout(value = 40, unit = TimeUnit.MINUTES) -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@Timeout(value = 6, unit = TimeUnit.MINUTES) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class HyperdisksIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-east1-c"; - private static String HYPERDISK_NAME; - private static String HYPERDISK_IN_POOL_NAME; - private static String STORAGE_POOL_NAME; + private static final String ZONE = "us-central1-a"; + private static final String HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); + private static final String HYPERDISK_IN_POOL_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); + private static final String STORAGE_POOL_NAME = "test-storage-pool-enc-" + UUID.randomUUID(); + private static final String PERFORMANCE_PROVISIONING_TYPE = "advanced"; + private static final String CAPACITY_PROVISIONING_TYPE = "advanced"; // Check if the required environment variables are set. public static void requireEnvVar(String envVarName) { @@ -60,12 +62,6 @@ public static void setUp() throws IOException, ExecutionException, InterruptedException, TimeoutException { requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); requireEnvVar("GOOGLE_CLOUD_PROJECT"); - HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - HYPERDISK_IN_POOL_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - STORAGE_POOL_NAME = "test-storage-pool-enc-" + UUID.randomUUID(); - - Util.cleanUpExistingDisks("test-hyperdisk-enc-", PROJECT_ID, ZONE); - Util.cleanUpExistingStoragePool("test-storage-pool-enc-", PROJECT_ID, ZONE); } @AfterAll @@ -73,13 +69,14 @@ public static void cleanup() throws IOException, InterruptedException, ExecutionException, TimeoutException { // Delete all disks created for testing. DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_NAME); - //DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME); + DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME); - //Util.deleteStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME); + Util.deleteStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME); } @Test - public void stage1_CreateHyperdiskTest() + @Order(1) + public void testCreateHyperdisk() throws IOException, ExecutionException, InterruptedException, TimeoutException { String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); @@ -96,29 +93,33 @@ public void stage1_CreateHyperdiskTest() Assert.assertTrue(hyperdisk.getZone().contains(ZONE)); } - @Disabled @Test - public void stage1_CreateHyperdiskStoragePoolTest() + @Order(1) + public void testCreateHyperdiskStoragePool() throws IOException, ExecutionException, InterruptedException, TimeoutException { String poolType = String.format("projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", PROJECT_ID, ZONE); StoragePool storagePool = CreateHyperdiskStoragePool .createHyperdiskStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME, poolType, - "advanced", 10240, 10000, 10240); + CAPACITY_PROVISIONING_TYPE, 10240, 10000, 1024, + PERFORMANCE_PROVISIONING_TYPE); Assert.assertNotNull(storagePool); Assert.assertEquals(STORAGE_POOL_NAME, storagePool.getName()); Assert.assertEquals(10000, storagePool.getPoolProvisionedIops()); - Assert.assertEquals(10240, storagePool.getPoolProvisionedThroughput()); + Assert.assertEquals(1024, storagePool.getPoolProvisionedThroughput()); Assert.assertEquals(10240, storagePool.getPoolProvisionedCapacityGb()); Assert.assertTrue(storagePool.getStoragePoolType().contains("hyperdisk-balanced")); - Assert.assertTrue(storagePool.getCapacityProvisioningType().equalsIgnoreCase("advanced")); + Assert.assertTrue(storagePool.getCapacityProvisioningType() + .equalsIgnoreCase(CAPACITY_PROVISIONING_TYPE)); + Assert.assertTrue(storagePool.getPerformanceProvisioningType() + .equalsIgnoreCase(PERFORMANCE_PROVISIONING_TYPE)); Assert.assertTrue(storagePool.getZone().contains(ZONE)); } - @Disabled @Test - public void stage2_CreateHyperdiskStoragePoolTest() + @Order(2) + public void testCreateDiskInStoragePool() throws IOException, ExecutionException, InterruptedException, TimeoutException { String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); String storagePoolLink = String From 282ec817e77a62767c58a7c423ed3d3ab98df655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:32:43 +0100 Subject: [PATCH 09/66] feat(tpu): add create/delete/get samples (#9585) * Changed package, added information to CODEOWNERS * Added information to CODEOWNERS * Added timeout * Fixed parameters for test * Fixed DeleteTpuVm and naming * Added comment, created Util class * Fixed naming * Fixed whitespace * Split PR into smaller, deleted redundant code * Fixed tests, deleted cleanup method * Fixed tests --- .../tpu/CreateQueuedResourceWithNetwork.java | 5 +- tpu/src/main/java/tpu/CreateTpuVm.java | 97 +++++++++++++++++++ .../java/tpu/DeleteForceQueuedResource.java | 1 - tpu/src/main/java/tpu/DeleteTpuVm.java | 80 +++++++++++++++ tpu/src/main/java/tpu/GetTpuVm.java | 56 +++++++++++ ...thNetworkIT.java => QueuedResourceIT.java} | 31 +++--- tpu/src/test/java/tpu/TpuVmIT.java | 93 ++++++++++++++++++ tpu/src/test/java/tpu/Util.java | 87 ----------------- 8 files changed, 340 insertions(+), 110 deletions(-) create mode 100644 tpu/src/main/java/tpu/CreateTpuVm.java create mode 100644 tpu/src/main/java/tpu/DeleteTpuVm.java create mode 100644 tpu/src/main/java/tpu/GetTpuVm.java rename tpu/src/test/java/tpu/{CreateQueuedResourceWithNetworkIT.java => QueuedResourceIT.java} (74%) create mode 100644 tpu/src/test/java/tpu/TpuVmIT.java delete mode 100644 tpu/src/test/java/tpu/Util.java diff --git a/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java index de9aa884aac..9598603ad34 100644 --- a/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java +++ b/tpu/src/main/java/tpu/CreateQueuedResourceWithNetwork.java @@ -128,11 +128,10 @@ public static QueuedResource createQueuedResourceWithNetwork( .setQueuedResourceId(queuedResourceId) .build(); - QueuedResource response = tpuClient.createQueuedResourceAsync(request).get(); // You can wait until TPU Node is READY, // and check its status using getTpuVm() from "tpu_vm_get" sample. - System.out.println("Queued Resource created: " + queuedResourceId); - return response; + + return tpuClient.createQueuedResourceAsync(request).get(); } } } diff --git a/tpu/src/main/java/tpu/CreateTpuVm.java b/tpu/src/main/java/tpu/CreateTpuVm.java new file mode 100644 index 00000000000..667b7aa5012 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateTpuVm.java @@ -0,0 +1,97 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_create] +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class CreateTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + + createTpuVm(projectId, zone, nodeName, tpuType, tpuSoftwareVersion); + } + + // Creates a TPU VM with the specified name, zone, accelerator type, and version. + public static Node createTpuVm( + String projectId, String zone, String nodeName, String tpuType, String tpuSoftwareVersion) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .createNodeOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofHours(24L)) + .build())); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + + Node tpuVm = Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .build(); + + CreateNodeRequest request = CreateNodeRequest.newBuilder() + .setParent(parent) + .setNodeId(nodeName) + .setNode(tpuVm) + .build(); + + return tpuClient.createNodeAsync(request).get(); + } + } +} +//[END tpu_vm_create] diff --git a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java index ed499d8cac5..3de8567857d 100644 --- a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java +++ b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java @@ -17,7 +17,6 @@ package tpu; //[START tpu_queued_resources_delete_force] - import com.google.api.gax.retrying.RetrySettings; import com.google.api.gax.rpc.UnknownException; import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; diff --git a/tpu/src/main/java/tpu/DeleteTpuVm.java b/tpu/src/main/java/tpu/DeleteTpuVm.java new file mode 100644 index 00000000000..a76b1d5487c --- /dev/null +++ b/tpu/src/main/java/tpu/DeleteTpuVm.java @@ -0,0 +1,80 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_delete] +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.tpu.v2.DeleteNodeRequest; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import org.threeten.bp.Duration; + +public class DeleteTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + deleteTpuVm(projectId, zone, nodeName); + } + + // Deletes a TPU VM with the specified name in the given project and zone. + public static void deleteTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // With these settings the client library handles the Operation's polling mechanism + // and prevent CancellationException error + TpuSettings.Builder clientSettings = + TpuSettings.newBuilder(); + clientSettings + .deleteNodeOperationSettings() + .setPollingAlgorithm( + OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(5000L)) + .setRetryDelayMultiplier(1.5) + .setMaxRetryDelay(Duration.ofMillis(45000L)) + .setInitialRpcTimeout(Duration.ZERO) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ZERO) + .setTotalTimeout(Duration.ofHours(24L)) + .build())); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + DeleteNodeRequest request = DeleteNodeRequest.newBuilder().setName(name).build(); + + tpuClient.deleteNodeAsync(request).get(); + System.out.println("TPU VM deleted"); + } + } +} +//[END tpu_vm_delete] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/GetTpuVm.java b/tpu/src/main/java/tpu/GetTpuVm.java new file mode 100644 index 00000000000..b1d6608b5b4 --- /dev/null +++ b/tpu/src/main/java/tpu/GetTpuVm.java @@ -0,0 +1,56 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_get] +import com.google.cloud.tpu.v2.GetNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; + +public class GetTpuVm { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + getTpuVm(projectId, zone, nodeName); + } + + // Describes a TPU VM with the specified name in the given project and zone. + public static Node getTpuVm(String projectId, String zone, String nodeName) + throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + GetNodeRequest request = GetNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.getNode(request); + } + } +} +//[END tpu_vm_get] diff --git a/tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java b/tpu/src/test/java/tpu/QueuedResourceIT.java similarity index 74% rename from tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java rename to tpu/src/test/java/tpu/QueuedResourceIT.java index 8e7f56fb97b..a7dbba51ff4 100644 --- a/tpu/src/test/java/tpu/CreateQueuedResourceWithNetworkIT.java +++ b/tpu/src/test/java/tpu/QueuedResourceIT.java @@ -17,15 +17,13 @@ package tpu; import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; -import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.tpu.v2alpha1.QueuedResource; -import java.io.IOException; import java.util.UUID; import java.util.concurrent.TimeUnit; import org.junit.Test; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; @@ -33,34 +31,29 @@ @RunWith(JUnit4.class) @Timeout(value = 6, unit = TimeUnit.MINUTES) -public class CreateQueuedResourceWithNetworkIT { - +public class QueuedResourceIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private static final String ZONE = "europe-west4-a"; - static String javaVersion = System.getProperty("java.version").substring(0, 2); - private static final String NODE_NAME = "test-tpu-queued-resource-network-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + private static final String NODE_NAME = "test-tpu-queued-resource-network-" + UUID.randomUUID(); private static final String TPU_TYPE = "v2-8"; private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; - private static final String QUEUED_RESOURCE_NAME = "queued-resource-network-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + private static final String QUEUED_RESOURCE_NAME = "queued-resource-network-" + UUID.randomUUID(); private static final String NETWORK_NAME = "default"; - @BeforeAll - public static void setUp() throws IOException { + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } - // Cleanup existing stale resources. - Util.cleanUpExistingQueuedResources("queued-resource-network-", PROJECT_ID, ZONE); + @BeforeAll + public static void setUp() { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); } @AfterAll public static void cleanup() { DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); - - // Test that resource is deleted - Assertions.assertThrows( - NotFoundException.class, - () -> GetQueuedResource.getQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME)); } @Test diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java new file mode 100644 index 00000000000..761c1b1c5bd --- /dev/null +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -0,0 +1,93 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.tpu.v2.Node; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 15, unit = TimeUnit.MINUTES) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class TpuVmIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "asia-east1-c"; + private static final String NODE_NAME = "test-tpu-" + UUID.randomUUID(); + private static final String TPU_TYPE = "v2-8"; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.12.1"; + private static final String NODE_PATH_NAME = + String.format("projects/%s/locations/%s/nodes/%s", PROJECT_ID, ZONE, NODE_NAME); + + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @AfterAll + public static void cleanup() throws Exception { + DeleteTpuVm.deleteTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + // Test that TPUs is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME)); + } + + @Test + @Order(1) + public void testCreateTpuVm() throws IOException, ExecutionException, InterruptedException { + + Node node = CreateTpuVm.createTpuVm( + PROJECT_ID, ZONE, NODE_NAME, TPU_TYPE, TPU_SOFTWARE_VERSION); + + assertNotNull(node); + assertThat(node.getName().equals(NODE_NAME)); + assertThat(node.getAcceleratorType().equals(TPU_TYPE)); + } + + @Test + @Order(2) + public void testGetTpuVm() throws IOException { + Node node = GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + assertNotNull(node); + assertThat(node.getName()).isEqualTo(NODE_PATH_NAME); + } +} \ No newline at end of file diff --git a/tpu/src/test/java/tpu/Util.java b/tpu/src/test/java/tpu/Util.java deleted file mode 100644 index c5d4d839291..00000000000 --- a/tpu/src/test/java/tpu/Util.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tpu; - -import com.google.cloud.tpu.v2.Node; -import com.google.cloud.tpu.v2.TpuClient; -import com.google.cloud.tpu.v2alpha1.QueuedResource; -import com.google.protobuf.Timestamp; -import java.io.IOException; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; -import java.util.concurrent.ExecutionException; - -public class Util { - private static final int DELETION_THRESHOLD_TIME_MINUTES = 30; - - // Delete TPU VMs which starts with the given prefixToDelete and - // has creation timestamp >30 minutes. - public static void cleanUpExistingQueuedResources( - String prefixToDelete, String projectId, String zone) - throws IOException { - try (com.google.cloud.tpu.v2alpha1.TpuClient tpuClient = - com.google.cloud.tpu.v2alpha1.TpuClient.create()) { - String parent = String.format("projects/%s/locations/%s", projectId, zone); - - for (QueuedResource queuedResource : tpuClient.listQueuedResources(parent).iterateAll()) { - - com.google.cloud.tpu.v2alpha1.Node node = queuedResource.getTpu().getNodeSpec(0).getNode(); - String creationTime = formatTimestamp(node.getCreateTime()); - String name = queuedResource.getName() - .substring(queuedResource.getName().lastIndexOf("/") + 1); - if (containPrefixToDeleteAndZone(queuedResource, prefixToDelete, zone) - && isCreatedBeforeThresholdTime(creationTime)) { - DeleteForceQueuedResource.deleteForceQueuedResource(projectId, zone, name); - } - } - } - } - - public static boolean containPrefixToDeleteAndZone( - Object resource, String prefixToDelete, String zone) { - boolean containPrefixAndZone = false; - try { - if (resource instanceof Node) { - containPrefixAndZone = ((Node) resource).getName().contains(prefixToDelete) - && ((Node) resource).getName().split("/")[3].contains(zone); - } - if (resource instanceof QueuedResource) { - containPrefixAndZone = ((QueuedResource) resource).getName().contains(prefixToDelete) - && ((QueuedResource) resource).getName().split("/")[3].contains(zone); - } - } catch (NullPointerException e) { - System.out.println("Resource not found, skipping deletion:"); - } - return containPrefixAndZone; - } - - public static boolean isCreatedBeforeThresholdTime(String timestamp) { - return OffsetDateTime.parse(timestamp).toInstant() - .isBefore(Instant.now().minus(DELETION_THRESHOLD_TIME_MINUTES, ChronoUnit.MINUTES)); - } - - private static String formatTimestamp(Timestamp timestamp) { - DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); - OffsetDateTime offsetDateTime = OffsetDateTime.ofInstant( - Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos()), - ZoneOffset.UTC); - return formatter.format(offsetDateTime); - } -} \ No newline at end of file From 2df21d1634318a0173ca6de0656555c2bd6044d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:24:58 +0100 Subject: [PATCH 10/66] feat(compute): add compute consume reservation samples (#9596) * Implemented consume reservation samples, created tests * Fixed code * Fixed code * Fixed indentation * Fixed comments * Fixed code following recommendations * Fixed return value * deleted empty line after tag * Fixed tests * Fixed tests * Fixed test * Fixed zone * Fixed zone * Cleanup reservations * Fixed lint issue * Fixed test * Changed zone for HyperdisksIT * Cleanup resources * Changed zone for HyperdisksIT * Added cleanup methods * Deleted cleanup methods * Deleted cleanup methods --- .../ConsumeAnyMatchingReservation.java | 125 ++++++++++++++ .../ConsumeSingleProjectReservation.java | 127 ++++++++++++++ .../ConsumeSpecificSharedReservation.java | 131 ++++++++++++++ .../reservation/CreateReservation.java | 40 ++--- .../CreateReservationForInstanceTemplate.java | 12 +- .../reservation/CreateReservationFromVm.java | 1 - .../reservation/CreateSharedReservation.java | 1 - .../reservation/DeleteReservation.java | 1 - .../compute/reservation/GetReservation.java | 1 - .../compute/reservation/ListReservations.java | 1 - .../reservation/UpdateVmsForReservation.java | 9 +- .../test/java/compute/disks/HyperdisksIT.java | 2 +- .../reservation/ConsumeReservationsIT.java | 163 ++++++++++++++++++ .../CrudOperationsReservationIT.java | 47 ++--- .../compute/reservation/ReservationIT.java | 91 ++-------- ...ava => WithoutConsumingReservationIT.java} | 8 +- 16 files changed, 608 insertions(+), 152 deletions(-) create mode 100644 compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java create mode 100644 compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java create mode 100644 compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java create mode 100644 compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java rename compute/cloud-client/src/test/java/compute/reservation/{ConsumeReservationIT.java => WithoutConsumingReservationIT.java} (95%) diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java new file mode 100644 index 00000000000..b8d1ac7f8f9 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeAnyMatchingReservation.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_any_matching_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.ANY_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeAnyMatchingReservation { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Zone where the VM instance will be created. + String zone = "us-central1-a"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, machineTypeName, sourceImage, + diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // In this consumption model, existing and new VMs automatically consume a reservation + // if their properties match the VM properties specified in the reservation. + public static Instance createInstanceAsync(String projectId, String zone, + String instanceName, String machineTypeName, String sourceImage, + long diskSizeGb, String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(ANY_RESERVATION.toString()) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_any_matching_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java new file mode 100644 index 00000000000..8f1118b4d1b --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSingleProjectReservation.java @@ -0,0 +1,127 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_single_project_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeSingleProjectReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone where the reservation is located. + String zone = "us-central1-a"; + // Name of the reservation you want to query. + String reservationName = "YOUR_RESERVATION_NAME"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, reservationName, machineTypeName, + sourceImage, diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // Ensure that the VM's properties match the reservation's VM properties. + public static Instance createInstanceAsync(String projectId, String zone, String instanceName, + String reservationName, String machineTypeName, String sourceImage, long diskSizeGb, + String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(SPECIFIC_RESERVATION.toString()) + .setKey("compute.googleapis.com/reservation-name") + // Set specific reservation + .addValues(reservationName) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_single_project_reservation] diff --git a/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java new file mode 100644 index 00000000000..acf084798bf --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/reservation/ConsumeSpecificSharedReservation.java @@ -0,0 +1,131 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +// [START compute_consume_specific_shared_reservation] +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AttachedDisk; +import com.google.cloud.compute.v1.AttachedDiskInitializeParams; +import com.google.cloud.compute.v1.InsertInstanceRequest; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.InstancesClient; +import com.google.cloud.compute.v1.NetworkInterface; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ReservationAffinity; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConsumeSpecificSharedReservation { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // Name of the zone the reservation is located. + String zone = "us-central1-a"; + // Name of the reservation you want to query. + String reservationName = "YOUR_RESERVATION_NAME"; + // Name of the VM instance you want to query. + String instanceName = "YOUR_INSTANCE_NAME"; + // machineType: machine type of the VM being created. + // * For a list of machine types, see https://cloud.google.com/compute/docs/machine-types + String machineTypeName = "n1-standard-4"; + // sourceImage: path to the operating system image to mount. + // * For details about images you can mount, see https://cloud.google.com/compute/docs/images + String sourceImage = "projects/debian-cloud/global/images/family/debian-11"; + // diskSizeGb: storage size of the boot disk to attach to the instance. + long diskSizeGb = 10L; + // networkName: network interface to associate with the instance. + String networkName = "default"; + // Minimum CPU platform of the instances. + String minCpuPlatform = "Intel Skylake"; + + createInstanceAsync(projectId, zone, instanceName, reservationName, machineTypeName, + sourceImage, diskSizeGb, networkName, minCpuPlatform); + } + + // Create a virtual machine targeted with the reserveAffinity field. + // Ensure that the VM's properties match the reservation's VM properties. + public static Instance createInstanceAsync(String projectId, String zone, String instanceName, + String reservationName, String machineTypeName, String sourceImage, long diskSizeGb, + String networkName, String minCpuPlatform) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + String machineType = String.format("zones/%s/machineTypes/%s", zone, machineTypeName); + // To consume this reservation from any consumer projects that this reservation is shared with, + // you must also specify the owner project of the reservation - the path to the reservation. + String reservationPath = + String.format("projects/%s/reservations/%s", projectId, reservationName); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (InstancesClient instancesClient = InstancesClient.create()) { + AttachedDisk disk = + AttachedDisk.newBuilder() + .setBoot(true) + .setAutoDelete(true) + .setType(AttachedDisk.Type.PERSISTENT.toString()) + .setDeviceName("disk-1") + .setInitializeParams( + AttachedDiskInitializeParams.newBuilder() + .setSourceImage(sourceImage) + .setDiskSizeGb(diskSizeGb) + .build()) + .build(); + + NetworkInterface networkInterface = NetworkInterface.newBuilder() + .setName(networkName) + .build(); + + ReservationAffinity reservationAffinity = + ReservationAffinity.newBuilder() + .setConsumeReservationType(SPECIFIC_RESERVATION.toString()) + .setKey("compute.googleapis.com/reservation-name") + // Set specific reservation + .addValues(reservationPath) + .build(); + + Instance instanceResource = + Instance.newBuilder() + .setName(instanceName) + .setMachineType(machineType) + .addDisks(disk) + .addNetworkInterfaces(networkInterface) + .setMinCpuPlatform(minCpuPlatform) + .setReservationAffinity(reservationAffinity) + .build(); + + InsertInstanceRequest insertInstanceRequest = InsertInstanceRequest.newBuilder() + .setProject(projectId) + .setZone(zone) + .setInstanceResource(instanceResource) + .build(); + + OperationFuture operation = instancesClient.insertAsync( + insertInstanceRequest); + Operation response = operation.get(3, TimeUnit.MINUTES); + + if (response.hasError()) { + return null; + } + return instancesClient.get(projectId, zone, instanceName); + } + } +} +// [END compute_consume_specific_shared_reservation] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java index 3ea126d692a..c2f79720167 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservation.java @@ -47,29 +47,27 @@ public static void main(String[] args) } // Creates reservation with optional flags - public static void createReservation( + public static Reservation createReservation( String projectId, String reservationName, int numberOfVms, String zone) throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Create the reservation with optional properties: + // Machine type of the instances in the reservation. + String machineType = "n1-standard-2"; + // Number of accelerators to be attached to the instances in the reservation. + int numberOfAccelerators = 1; + // Accelerator type to be attached to the instances in the reservation. + String acceleratorType = "nvidia-tesla-t4"; + // Minimum CPU platform to be attached to the instances in the reservation. + String minCpuPlatform = "Intel Skylake"; + // Local SSD size in GB to be attached to the instances in the reservation. + int localSsdSize = 375; + // Local SSD interfaces to be attached to the instances in the reservation. + String localSsdInterface1 = "NVME"; + String localSsdInterface2 = "SCSI"; + boolean specificReservationRequired = true; // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. try (ReservationsClient reservationsClient = ReservationsClient.create()) { - - // Create the reservation with optional properties: - // Machine type of the instances in the reservation. - String machineType = "n1-standard-2"; - // Number of accelerators to be attached to the instances in the reservation. - int numberOfAccelerators = 1; - // Accelerator type to be attached to the instances in the reservation. - String acceleratorType = "nvidia-tesla-t4"; - // Minimum CPU platform to be attached to the instances in the reservation. - String minCpuPlatform = "Intel Skylake"; - // Local SSD size in GB to be attached to the instances in the reservation. - int localSsdSize = 375; - // Local SSD interfaces to be attached to the instances in the reservation. - String localSsdInterface1 = "NVME"; - String localSsdInterface2 = "SCSI"; - boolean specificReservationRequired = true; - Reservation reservation = Reservation.newBuilder() .setName(reservationName) @@ -105,15 +103,13 @@ public static void createReservation( .build()) .build(); - // Wait for the create reservation operation to complete. Operation response = reservationsClient.insertAsync(projectId, zone, reservation).get(7, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation creation failed!" + response); - return; + return null; } - System.out.println("Reservation created. Operation Status: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java index 69edf13ef22..fca7a3ca6d6 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationForInstanceTemplate.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_create_template] - import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.Reservation; @@ -44,7 +43,6 @@ public static void main(String[] args) // to be used for creating the reservation. String instanceTemplateUri = "projects/YOUR_PROJECT_ID/global/instanceTemplates/YOUR_INSTANCE_TEMPLATE_NAME"; - // The URI of the instance template with REGIONAL location // to be used for creating the reservation. For us-central1 region in this case. // String instanceTemplateUri = @@ -55,15 +53,13 @@ public static void main(String[] args) } // Creates a reservation in a project for the instance template. - public static void createReservationForInstanceTemplate( + public static Reservation createReservationForInstanceTemplate( String projectId, String reservationName, String instanceTemplateUri, int numberOfVms, String zone) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created // once, and can be reused for multiple requests. try (ReservationsClient reservationsClient = ReservationsClient.create()) { - - // Create the reservation. Reservation reservation = Reservation.newBuilder() .setName(reservationName) @@ -77,15 +73,13 @@ public static void createReservationForInstanceTemplate( .build()) .build(); - // Wait for the create reservation operation to complete. Operation response = reservationsClient.insertAsync(projectId, zone, reservation).get(3, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation creation failed!" + response); - return; + return null; } - System.out.println("Reservation created. Operation Status: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java index 0b004cd19c3..0a7c6bab178 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateReservationFromVm.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_create_from_vm] - import com.google.cloud.compute.v1.AcceleratorConfig; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk; import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; diff --git a/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java b/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java index 91eb89f4336..53052ee4d25 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/CreateSharedReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_create_shared] - import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.Reservation; diff --git a/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java b/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java index d539732049c..60671d46feb 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/DeleteReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_delete] - import com.google.cloud.compute.v1.DeleteReservationRequest; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.ReservationsClient; diff --git a/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java b/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java index 5e99d11c191..6c74227df4d 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/GetReservation.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_get] - import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import java.io.IOException; diff --git a/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java b/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java index 969f4d25d6b..8c907037a37 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java +++ b/compute/cloud-client/src/main/java/compute/reservation/ListReservations.java @@ -17,7 +17,6 @@ package compute.reservation; // [START compute_reservation_list] - import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import java.io.IOException; diff --git a/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java b/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java index 1d37e2c0ade..48fa92b7599 100644 --- a/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java +++ b/compute/cloud-client/src/main/java/compute/reservation/UpdateVmsForReservation.java @@ -17,8 +17,8 @@ package compute.reservation; // [START compute_reservation_vms_update] - import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.Reservation; import com.google.cloud.compute.v1.ReservationsClient; import com.google.cloud.compute.v1.ReservationsResizeRequest; import com.google.cloud.compute.v1.ResizeReservationRequest; @@ -45,7 +45,7 @@ public static void main(String[] args) } // Updates a reservation with new VM capacity. - public static void updateVmsForReservation( + public static Reservation updateVmsForReservation( String projectId, String zone, String reservationName, int numberOfVms) throws IOException, ExecutionException, InterruptedException, TimeoutException { // Initialize client that will be used to send requests. This client only needs to be created @@ -66,10 +66,9 @@ public static void updateVmsForReservation( .get(3, TimeUnit.MINUTES); if (response.hasError()) { - System.out.println("Reservation update failed !!" + response); - return; + return null; } - System.out.println("Reservation updated successfully: " + response.getStatus()); + return reservationsClient.get(projectId, zone, reservationName); } } } diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java index 1ec9e09aff6..4b61e5bf16d 100644 --- a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java +++ b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java @@ -44,7 +44,7 @@ @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class HyperdisksIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-central1-a"; + private static final String ZONE = "us-west1-a"; private static final String HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); private static final String HYPERDISK_IN_POOL_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); private static final String STORAGE_POOL_NAME = "test-storage-pool-enc-" + UUID.randomUUID(); diff --git a/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java new file mode 100644 index 00000000000..96c8b22a4e0 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationsIT.java @@ -0,0 +1,163 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.reservation; + +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.ANY_RESERVATION; +import static com.google.cloud.compute.v1.ReservationAffinity.ConsumeReservationType.SPECIFIC_RESERVATION; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.compute.v1.AllocationSpecificSKUAllocationReservedInstanceProperties; +import com.google.cloud.compute.v1.AllocationSpecificSKUReservation; +import com.google.cloud.compute.v1.Instance; +import com.google.cloud.compute.v1.Reservation; +import com.google.cloud.compute.v1.ReservationsClient; +import compute.DeleteInstance; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Assert; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 6, unit = TimeUnit.MINUTES) +public class ConsumeReservationsIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-central1-a"; + static String templateUUID = UUID.randomUUID().toString(); + private static final String RESERVATION_NAME = "test-reservaton-" + templateUUID; + private static final String INSTANCE_FOR_SPR = "test-instance-for-spr-" + templateUUID; + private static final String INSTANCE_FOR_ANY_MATCHING = "test-instance-" + templateUUID; + private static final String SPECIFIC_SHARED_INSTANCE = "test-instance-shared-" + templateUUID; + private static final String MACHINE_TYPE = "n1-standard-4"; + private static final String SOURCE_IMAGE = "projects/debian-cloud/global/images/family/debian-11"; + private static final String NETWORK_NAME = "default"; + private static final long DISK_SIZE_GB = 10L; + private static final String MIN_CPU_PLATFORM = "Intel Skylake"; + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + + ConsumeReservationsIT.createReservation( + PROJECT_ID, RESERVATION_NAME, ZONE); + } + + @AfterAll + public static void cleanup() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Delete all instances created for testing. + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_FOR_SPR); + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, INSTANCE_FOR_ANY_MATCHING); + DeleteInstance.deleteInstance(PROJECT_ID, ZONE, SPECIFIC_SHARED_INSTANCE); + + // Delete all reservations created for testing. + DeleteReservation.deleteReservation(PROJECT_ID, ZONE, RESERVATION_NAME); + + // Test that reservation is deleted + Assertions.assertThrows( + NotFoundException.class, + () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME, ZONE)); + } + + @Test + public void testConsumeAnyMatchingReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeAnyMatchingReservation + .createInstanceAsync(PROJECT_ID, ZONE, INSTANCE_FOR_ANY_MATCHING, + MACHINE_TYPE, SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + Assert.assertEquals(ANY_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testConsumeSingleProjectReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeSingleProjectReservation.createInstanceAsync( + PROJECT_ID, ZONE, INSTANCE_FOR_SPR, RESERVATION_NAME, MACHINE_TYPE, + SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + assertThat(instance.getReservationAffinity().getValuesList()) + .contains(RESERVATION_NAME); + Assert.assertEquals(SPECIFIC_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + @Test + public void testConsumeSpecificSharedReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + Instance instance = ConsumeSpecificSharedReservation.createInstanceAsync( + PROJECT_ID, ZONE, SPECIFIC_SHARED_INSTANCE, RESERVATION_NAME, MACHINE_TYPE, + SOURCE_IMAGE, DISK_SIZE_GB, NETWORK_NAME, MIN_CPU_PLATFORM); + + assertNotNull(instance); + Assert.assertTrue(instance.getReservationAffinity() + .getValuesList().get(0).contains(RESERVATION_NAME)); + Assert.assertEquals(SPECIFIC_RESERVATION.toString(), + instance.getReservationAffinity().getConsumeReservationType()); + } + + // Creates reservation with the given parameters. + public static void createReservation( + String projectId, String reservationName, String zone) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + boolean specificReservationRequired = true; + int numberOfVms = 3; + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ReservationsClient reservationsClient = ReservationsClient.create()) { + Reservation reservation = + Reservation.newBuilder() + .setName(reservationName) + .setZone(zone) + .setSpecificReservationRequired(specificReservationRequired) + .setSpecificReservation( + AllocationSpecificSKUReservation.newBuilder() + .setCount(numberOfVms) + .setInstanceProperties( + AllocationSpecificSKUAllocationReservedInstanceProperties.newBuilder() + .setMachineType(MACHINE_TYPE) + .setMinCpuPlatform(MIN_CPU_PLATFORM) + .build()) + .build()) + .build(); + + reservationsClient.insertAsync(projectId, zone, reservation).get(3, TimeUnit.MINUTES); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java index 5dae4a347d6..0dd9209ae5f 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/CrudOperationsReservationIT.java @@ -22,10 +22,7 @@ import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.compute.v1.Reservation; -import compute.Util; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -35,24 +32,17 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) @Timeout(value = 6, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class CrudOperationsReservationIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-central1-b"; - static String javaVersion = System.getProperty("java.version").substring(0, 2); - private static final String RESERVATION_NAME = "test-reservation-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + private static final String ZONE = "us-central1-a"; + private static final String RESERVATION_NAME = "test-reservation-" + UUID.randomUUID(); private static final int NUMBER_OF_VMS = 3; // Check if the required environment variables are set. @@ -67,14 +57,12 @@ public static void setUp() requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); requireEnvVar("GOOGLE_CLOUD_PROJECT"); - // Cleanup existing stale resources. - Util.cleanUpExistingReservations("test-reservation-" + javaVersion, PROJECT_ID, ZONE); + CreateReservation.createReservation(PROJECT_ID, RESERVATION_NAME, NUMBER_OF_VMS, ZONE); } @AfterAll public static void cleanup() throws IOException, ExecutionException, InterruptedException, TimeoutException { - // Delete resources created for testing. DeleteReservation.deleteReservation(PROJECT_ID, ZONE, RESERVATION_NAME); // Test that reservation is deleted @@ -84,23 +72,6 @@ public static void cleanup() } @Test - @Order(1) - public void testCreateReservation() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); - System.setOut(new PrintStream(stdOut)); - CreateReservation.createReservation( - PROJECT_ID, RESERVATION_NAME, NUMBER_OF_VMS, ZONE); - - assertThat(stdOut.toString()).contains("Reservation created. Operation Status: DONE"); - - stdOut.close(); - System.setOut(out); - } - - @Test - @Order(3) public void testGetReservation() throws IOException { Reservation reservation = GetReservation.getReservation( @@ -111,7 +82,6 @@ public void testGetReservation() } @Test - @Order(4) public void testListReservation() throws IOException { List reservations = ListReservations.listReservations(PROJECT_ID, ZONE); @@ -119,4 +89,15 @@ public void testListReservation() throws IOException { assertThat(reservations).isNotNull(); Assert.assertTrue(reservations.get(0).getName().contains("test-")); } + + @Test + public void testUpdateVmsForReservation() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + int newNumberOfVms = 1; + Reservation reservation = UpdateVmsForReservation.updateVmsForReservation( + PROJECT_ID, ZONE, RESERVATION_NAME, newNumberOfVms); + + Assert.assertNotNull(reservation); + Assert.assertEquals(newNumberOfVms, reservation.getSpecificReservation().getCount()); + } } \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java index 48e25d2dd7b..22be75ea98a 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/ReservationIT.java @@ -18,6 +18,7 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -35,7 +36,6 @@ import compute.CreateRegionalInstanceTemplate; import compute.DeleteInstanceTemplate; import compute.DeleteRegionalInstanceTemplate; -import compute.Util; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -45,51 +45,40 @@ import java.util.concurrent.TimeoutException; import org.junit.Assert; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) @Timeout(value = 6, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class ReservationIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private static final String ZONE = "asia-south1-a"; private static final String REGION = ZONE.substring(0, ZONE.lastIndexOf('-')); - static String javaVersion = System.getProperty("java.version").substring(0, 2); - private static ReservationsClient reservationsClient; - private static final String RESERVATION_NAME_GLOBAL = "test-reservation-global-" + javaVersion - + "-" + UUID.randomUUID().toString().substring(0, 8); - private static final String RESERVATION_NAME_REGIONAL = "test-reservation-regional-" - + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); + static String templateUUID = UUID.randomUUID().toString(); + private static final String RESERVATION_NAME_GLOBAL = "test-reservation-global-" + templateUUID; + private static final String RESERVATION_NAME_REGIONAL = + "test-reservation-regional-" + templateUUID; private static final String GLOBAL_INSTANCE_TEMPLATE_NAME = - "test-global-inst-temp-" + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); - private static final String REGIONAL_INSTANCE_TEMPLATE_NAME = "test-regional-inst-temp-" - + javaVersion + "-" + UUID.randomUUID().toString().substring(0, 8); + "test-global-inst-temp-" + templateUUID; + private static final String REGIONAL_INSTANCE_TEMPLATE_NAME = + "test-regional-inst-temp-" + templateUUID; private static final String GLOBAL_INSTANCE_TEMPLATE_URI = String.format( "projects/%s/global/instanceTemplates/%s", PROJECT_ID, GLOBAL_INSTANCE_TEMPLATE_NAME); private static final String REGIONAL_INSTANCE_TEMPLATE_URI = String.format("projects/%s/regions/%s/instanceTemplates/%s", PROJECT_ID, REGION, REGIONAL_INSTANCE_TEMPLATE_NAME); private static final String SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME = - "test-shared-inst-temp-" + javaVersion + "-" - + UUID.randomUUID().toString().substring(0, 8); + "test-shared-inst-temp-" + templateUUID; private static final String INSTANCE_TEMPLATE_SHARED_RESERV_URI = String.format("projects/%s/global/instanceTemplates/%s", PROJECT_ID, SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME); - private static final String RESERVATION_NAME_SHARED = "test-reservation-shared-" + javaVersion - + "-" + UUID.randomUUID().toString().substring(0, 8); + private static final String RESERVATION_NAME_SHARED = "test-reservation-shared-" + templateUUID; private static final int NUMBER_OF_VMS = 3; - private ByteArrayOutputStream stdOut; + private static ByteArrayOutputStream stdOut; // Check if the required environment variables are set. public static void requireEnvVar(String envVarName) { @@ -102,22 +91,9 @@ public static void setUp() throws IOException, ExecutionException, InterruptedException, TimeoutException { requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); requireEnvVar("GOOGLE_CLOUD_PROJECT"); - final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); + stdOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(stdOut)); - // Cleanup existing stale resources. - Util.cleanUpExistingInstanceTemplates("test-global-inst-temp-" + javaVersion, PROJECT_ID); - Util.cleanUpExistingRegionalInstanceTemplates( - "test-regional-inst-temp-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingReservations( - "test-reservation-global-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingReservations("test-reservation-regional-" + javaVersion, PROJECT_ID, ZONE); - Util.cleanUpExistingInstanceTemplates("test-shared-inst-temp-" + javaVersion, PROJECT_ID); - - // Initialize the client once for all tests - reservationsClient = ReservationsClient.create(); - // Create instance template with GLOBAL location. CreateInstanceTemplate.createInstanceTemplate(PROJECT_ID, GLOBAL_INSTANCE_TEMPLATE_NAME); assertThat(stdOut.toString()) @@ -129,16 +105,12 @@ public static void setUp() // Create instance template for shares reservation. CreateInstanceTemplate.createInstanceTemplate( PROJECT_ID, SPECIFIC_SHARED_INSTANCE_TEMPLATE_NAME); - - stdOut.close(); - System.setOut(out); } @AfterAll public static void cleanup() throws IOException, ExecutionException, InterruptedException, TimeoutException { final PrintStream out = System.out; - ByteArrayOutputStream stdOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(stdOut)); // Delete instance template with GLOBAL location. @@ -173,34 +145,19 @@ public static void cleanup() NotFoundException.class, () -> GetReservation.getReservation(PROJECT_ID, RESERVATION_NAME_REGIONAL, ZONE)); - // Close the client after all tests - reservationsClient.close(); - stdOut.close(); System.setOut(out); } - @BeforeEach - public void beforeEach() { - stdOut = new ByteArrayOutputStream(); - System.setOut(new PrintStream(stdOut)); - } - - @AfterEach - public void afterEach() { - stdOut = null; - System.setOut(null); - } - @Test - @Order(1) public void testCreateReservationWithGlobalInstanceTemplate() throws IOException, ExecutionException, InterruptedException, TimeoutException { - CreateReservationForInstanceTemplate.createReservationForInstanceTemplate( + Reservation reservation = CreateReservationForInstanceTemplate + .createReservationForInstanceTemplate( PROJECT_ID, RESERVATION_NAME_GLOBAL, GLOBAL_INSTANCE_TEMPLATE_URI, NUMBER_OF_VMS, ZONE); - Reservation reservation = reservationsClient.get(PROJECT_ID, ZONE, RESERVATION_NAME_GLOBAL); + assertNotNull(reservation); Assert.assertTrue(reservation.getSpecificReservation() .getSourceInstanceTemplate().contains(GLOBAL_INSTANCE_TEMPLATE_NAME)); Assert.assertEquals(RESERVATION_NAME_GLOBAL, reservation.getName()); @@ -209,30 +166,18 @@ public void testCreateReservationWithGlobalInstanceTemplate() @Test public void testCreateReservationWithRegionInstanceTemplate() throws IOException, ExecutionException, InterruptedException, TimeoutException { - CreateReservationForInstanceTemplate.createReservationForInstanceTemplate( + Reservation reservation = CreateReservationForInstanceTemplate + .createReservationForInstanceTemplate( PROJECT_ID, RESERVATION_NAME_REGIONAL, REGIONAL_INSTANCE_TEMPLATE_URI, NUMBER_OF_VMS, ZONE); - Reservation reservation = reservationsClient.get(PROJECT_ID, ZONE, RESERVATION_NAME_REGIONAL); + assertNotNull(reservation); Assert.assertTrue(reservation.getSpecificReservation() .getSourceInstanceTemplate().contains(REGIONAL_INSTANCE_TEMPLATE_NAME)); Assert.assertTrue(reservation.getZone().contains(ZONE)); Assert.assertEquals(RESERVATION_NAME_REGIONAL, reservation.getName()); } - @Test - @Order(2) - public void testUpdateVmsForReservation() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - int newNumberOfVms = 5; - UpdateVmsForReservation.updateVmsForReservation( - PROJECT_ID, ZONE, RESERVATION_NAME_GLOBAL, newNumberOfVms); - Reservation reservation = GetReservation.getReservation( - PROJECT_ID, RESERVATION_NAME_GLOBAL, ZONE); - - Assert.assertEquals(newNumberOfVms, reservation.getSpecificReservation().getCount()); - } - @Test public void testCreateSharedReservation() throws ExecutionException, InterruptedException, TimeoutException { diff --git a/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java b/compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java similarity index 95% rename from compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java rename to compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java index 066eb5711ad..763b1e2df5f 100644 --- a/compute/cloud-client/src/test/java/compute/reservation/ConsumeReservationIT.java +++ b/compute/cloud-client/src/test/java/compute/reservation/WithoutConsumingReservationIT.java @@ -38,14 +38,14 @@ @RunWith(JUnit4.class) @Timeout(value = 3, unit = TimeUnit.MINUTES) -public class ConsumeReservationIT { - +public class WithoutConsumingReservationIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); private static final String ZONE = "us-central1-a"; + static String templateUUID = UUID.randomUUID().toString(); private static final String INSTANCE_NOT_CONSUME_RESERVATION_NAME = - "test-instance-not-consume-" + UUID.randomUUID().toString().substring(0, 8); + "test-instance-not-consume-" + templateUUID; private static final String TEMPLATE_NOT_CONSUME_RESERVATION_NAME = - "test-template-not-consume-" + UUID.randomUUID().toString().substring(0, 8); + "test-template-not-consume-" + templateUUID; private static final String MACHINE_TYPE_NAME = "n1-standard-1"; private static final String SOURCE_IMAGE = "projects/debian-cloud/global/images/family/debian-11"; private static final String NETWORK_NAME = "default"; From 4d109500e2b73ad5fb42d0b9178fa2c7546552be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:44:37 +0100 Subject: [PATCH 11/66] feat(auth): add apikeys undelete api key sample. (#9619) * Implemented apikeys_undelete_api_key sample, created test * Fixed header --- auth/src/main/java/UndeleteApiKey.java | 59 ++++++++++++++++++++++++ auth/src/test/java/ApiKeySnippetsIT.java | 12 +++-- 2 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 auth/src/main/java/UndeleteApiKey.java diff --git a/auth/src/main/java/UndeleteApiKey.java b/auth/src/main/java/UndeleteApiKey.java new file mode 100644 index 00000000000..cd509c705b3 --- /dev/null +++ b/auth/src/main/java/UndeleteApiKey.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// [START apikeys_undelete_api_key] +import com.google.api.apikeys.v2.ApiKeysClient; +import com.google.api.apikeys.v2.Key; +import com.google.api.apikeys.v2.UndeleteKeyRequest; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class UndeleteApiKey { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project. + String projectId = "YOUR_PROJECT_ID"; + // The API key id to undelete. + String keyId = "YOUR_KEY_ID"; + + undeleteApiKey(projectId, keyId); + } + + // Undeletes an API key. + public static void undeleteApiKey(String projectId, String keyId) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ApiKeysClient apiKeysClient = ApiKeysClient.create()) { + + // Initialize the undelete request and set the argument. + UndeleteKeyRequest undeleteKeyRequest = UndeleteKeyRequest.newBuilder() + .setName(String.format("projects/%s/locations/global/keys/%s", projectId, keyId)) + .build(); + + // Make the request and wait for the operation to complete. + Key undeletedKey = apiKeysClient.undeleteKeyAsync(undeleteKeyRequest) + .get(3, TimeUnit.MINUTES); + + System.out.printf("Successfully undeleted the API key: %s", undeletedKey.getName()); + } + } +} +// [END apikeys_undelete_api_key] \ No newline at end of file diff --git a/auth/src/test/java/ApiKeySnippetsIT.java b/auth/src/test/java/ApiKeySnippetsIT.java index 46a059d2203..7f65313d0e1 100644 --- a/auth/src/test/java/ApiKeySnippetsIT.java +++ b/auth/src/test/java/ApiKeySnippetsIT.java @@ -36,7 +36,6 @@ public class ApiKeySnippetsIT { private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String CREDENTIALS = System.getenv("GOOGLE_APPLICATION_CREDENTIALS"); private static Key API_KEY; private static String API_KEY_STRING; private ByteArrayOutputStream stdOut; @@ -79,8 +78,15 @@ public static void cleanup() String apiKeyId = getApiKeyId(API_KEY); DeleteApiKey.deleteApiKey(PROJECT_ID, apiKeyId); - String goal = String.format("Successfully deleted the API key: %s", API_KEY.getName()); - assertThat(stdOut.toString()).contains(goal); + + UndeleteApiKey.undeleteApiKey(PROJECT_ID, apiKeyId); + String undeletedKey = String.format("Successfully undeleted the API key: %s", + API_KEY.getName()); + assertThat(stdOut.toString()).contains(undeletedKey); + + DeleteApiKey.deleteApiKey(PROJECT_ID, apiKeyId); + String deletedKey = String.format("Successfully deleted the API key: %s", API_KEY.getName()); + assertThat(stdOut.toString()).contains(deletedKey); stdOut.close(); System.setOut(out); From f89f6faf150a64aacc49f9e4683c68a3c5000d7b Mon Sep 17 00:00:00 2001 From: Veronica Wasson <3992422+VeronicaWasson@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:52:57 -0800 Subject: [PATCH 12/66] docs(samples): Read multiple Kafka topics from Dataflow (#9511) --- dataflow/snippets/pom.xml | 3 +- .../com/example/dataflow/KafkaReadTopics.java | 111 ++++++++++++++++++ .../com/example/dataflow/KafkaReadIT.java | 60 +++++++--- 3 files changed, 154 insertions(+), 20 deletions(-) create mode 100644 dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java diff --git a/dataflow/snippets/pom.xml b/dataflow/snippets/pom.xml index 76a6d12a149..c642a0b6191 100755 --- a/dataflow/snippets/pom.xml +++ b/dataflow/snippets/pom.xml @@ -163,11 +163,10 @@ ${apache_beam.version} - + org.apache.kafka kafka-clients 3.8.0 - test diff --git a/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java b/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java new file mode 100644 index 00000000000..a9d12f40fc3 --- /dev/null +++ b/dataflow/snippets/src/main/java/com/example/dataflow/KafkaReadTopics.java @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.dataflow; + +// [START dataflow_kafka_read_multi_topic] +import java.util.List; +import org.apache.beam.sdk.Pipeline; +import org.apache.beam.sdk.PipelineResult; +import org.apache.beam.sdk.io.TextIO; +import org.apache.beam.sdk.io.kafka.KafkaIO; +import org.apache.beam.sdk.options.Description; +import org.apache.beam.sdk.options.PipelineOptionsFactory; +import org.apache.beam.sdk.options.StreamingOptions; +import org.apache.beam.sdk.transforms.Filter; +import org.apache.beam.sdk.transforms.MapElements; +import org.apache.beam.sdk.values.TypeDescriptors; +import org.apache.kafka.common.serialization.LongDeserializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.joda.time.Duration; +import org.joda.time.Instant; + +public class KafkaReadTopics { + + // [END dataflow_kafka_read_multi_topic] + public interface Options extends StreamingOptions { + @Description("The Kafka bootstrap server. Example: localhost:9092") + String getBootstrapServer(); + + void setBootstrapServer(String value); + + @Description("The first Kafka topic to read from.") + String getTopic1(); + + void setTopic1(String value); + + @Description("The second Kafka topic to read from.") + String getTopic2(); + + void setTopic2(String value); + } + + public static PipelineResult.State main(String[] args) { + // Parse the pipeline options passed into the application. Example: + // --bootstrap_servers=$BOOTSTRAP_SERVERS --topic=$KAFKA_TOPIC --outputPath=$OUTPUT_FILE + // For more information, see https://beam.apache.org/documentation/programming-guide/#configuring-pipeline-options + var options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class); + options.setStreaming(true); + + Pipeline pipeline = createPipeline(options); + return pipeline.run().waitUntilFinish(); + } + + // [START dataflow_kafka_read_multi_topic] + public static Pipeline createPipeline(Options options) { + String topic1 = options.getTopic1(); + String topic2 = options.getTopic2(); + + // Build the pipeline. + var pipeline = Pipeline.create(options); + var allTopics = pipeline + .apply(KafkaIO.read() + .withTopics(List.of(topic1, topic2)) + .withBootstrapServers(options.getBootstrapServer()) + .withKeyDeserializer(LongDeserializer.class) + .withValueDeserializer(StringDeserializer.class) + .withMaxReadTime(Duration.standardSeconds(10)) + .withStartReadTime(Instant.EPOCH) + ); + + // Create separate pipeline branches for each topic. + // The first branch filters on topic1. + allTopics + .apply(Filter.by(record -> record.getTopic().equals(topic1))) + .apply(MapElements + .into(TypeDescriptors.strings()) + .via(record -> record.getKV().getValue())) + .apply(TextIO.write() + .to(topic1) + .withSuffix(".txt") + .withNumShards(1) + ); + + // The second branch filters on topic2. + allTopics + .apply(Filter.by(record -> record.getTopic().equals(topic2))) + .apply(MapElements + .into(TypeDescriptors.strings()) + .via(record -> record.getKV().getValue())) + .apply(TextIO.write() + .to(topic2) + .withSuffix(".txt") + .withNumShards(1) + ); + return pipeline; + } +} +// [END dataflow_kafka_read_multi_topic] diff --git a/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java b/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java index b5b1abba076..2c47dae1105 100644 --- a/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java +++ b/dataflow/snippets/src/test/java/com/example/dataflow/KafkaReadIT.java @@ -39,10 +39,13 @@ import org.testcontainers.utility.DockerImageName; public class KafkaReadIT { - private static final String TOPIC_NAME = "topic-" + UUID.randomUUID(); + private static final String[] TOPIC_NAMES = { + "topic-" + UUID.randomUUID(), + "topic-" + UUID.randomUUID() + }; - private static final String OUTPUT_FILE_NAME_PREFIX = UUID.randomUUID().toString(); - private static final String OUTPUT_FILE_NAME = OUTPUT_FILE_NAME_PREFIX + "-00000-of-00001.txt"; + // The TextIO connector appends this suffix to the pipeline output file. + private static final String OUTPUT_FILE_SUFFIX = "-00000-of-00001.txt"; private static KafkaContainer kafka; private static String bootstrapServer; @@ -54,26 +57,32 @@ public void setUp() throws ExecutionException, InterruptedException { kafka.start(); bootstrapServer = kafka.getBootstrapServers(); - // Create a topic. + // Create topics. Properties properties = new Properties(); properties.put("bootstrap.servers", bootstrapServer); AdminClient adminClient = AdminClient.create(properties); - var topic = new NewTopic(TOPIC_NAME, 1, (short) 1); - adminClient.createTopics(Arrays.asList(topic)); + for (String topicName : TOPIC_NAMES) { + var topic = new NewTopic(topicName, 1, (short) 1); + adminClient.createTopics(Arrays.asList(topic)); + } - // Send a message to the topic. - properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); + // Send messages to the topics. + properties.put("key.serializer", "org.apache.kafka.common.serialization.LongSerializer"); properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); - KafkaProducer producer = new KafkaProducer<>(properties); - ProducerRecord record = new ProducerRecord<>(TOPIC_NAME, "key-0", "event-0"); - Future future = producer.send(record); - future.get(); + KafkaProducer producer = new KafkaProducer<>(properties); + for (String topicName : TOPIC_NAMES) { + var record = new ProducerRecord<>(topicName, 0L, topicName + "-event-0"); + Future future = producer.send(record); + future.get(); + } } @After public void tearDown() throws IOException { kafka.stop(); - Files.deleteIfExists(Paths.get(OUTPUT_FILE_NAME)); + for (String topicName : TOPIC_NAMES) { + Files.deleteIfExists(Paths.get(topicName + OUTPUT_FILE_SUFFIX)); + } } @Test @@ -81,13 +90,28 @@ public void testApacheKafkaRead() throws IOException { PipelineResult.State state = KafkaRead.main(new String[] { "--runner=DirectRunner", "--bootstrapServer=" + bootstrapServer, - "--topic=" + TOPIC_NAME, - "--outputPath=" + OUTPUT_FILE_NAME_PREFIX + "--topic=" + TOPIC_NAMES[0], + "--outputPath=" + TOPIC_NAMES[0] // Use the topic name as the output file name. }); assertEquals(PipelineResult.State.DONE, state); + verifyOutput(TOPIC_NAMES[0]); + } + + @Test + public void testApacheKafkaReadTopics() throws IOException { + PipelineResult.State state = KafkaReadTopics.main(new String[] { + "--runner=DirectRunner", + "--bootstrapServer=" + bootstrapServer, + "--topic1=" + TOPIC_NAMES[0], + "--topic2=" + TOPIC_NAMES[1] + }); + assertEquals(PipelineResult.State.DONE, state); + verifyOutput(TOPIC_NAMES[0]); + verifyOutput(TOPIC_NAMES[1]); + } - // Verify the pipeline wrote the output. - String output = Files.readString(Paths.get(OUTPUT_FILE_NAME)); - assertTrue(output.contains("event-0")); + private void verifyOutput(String topic) throws IOException { + String output = Files.readString(Paths.get(topic + OUTPUT_FILE_SUFFIX)); + assertTrue(output.contains(topic + "-event-0")); } } From 99e983e92323026a3a3c8551243abdbb3df204dd Mon Sep 17 00:00:00 2001 From: lovenishs04 Date: Tue, 12 Nov 2024 17:04:13 +0000 Subject: [PATCH 13/66] feat(securitycenter): Add Resource SCC Management API Org SHA Custom Module code samples (Create, Delete, List, Get) (#9598) * sample code for CreateSecurityHealthAnalyticsCustomModule * sample code for DeleteSecurityHealthAnalyticsCustomModule * sample code for ListSecurityHealthAnalyticsCustomModules * sample code for GetSecurityHealthAnalyticsCustomModule * refactor region tag --- security-command-center/snippets/pom.xml | 21 +++ ...teSecurityHealthAnalyticsCustomModule.java | 101 +++++++++++ ...teSecurityHealthAnalyticsCustomModule.java | 55 ++++++ ...etSecurityHealthAnalyticsCustomModule.java | 58 ++++++ ...tSecurityHealthAnalyticsCustomModules.java | 52 ++++++ ...curityHealthAnalyticsCustomModuleTest.java | 165 ++++++++++++++++++ 6 files changed, 452 insertions(+) create mode 100644 security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java create mode 100644 security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java create mode 100644 security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java create mode 100644 security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java create mode 100644 security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java diff --git a/security-command-center/snippets/pom.xml b/security-command-center/snippets/pom.xml index dfc9f9bbe36..0c12cf541cd 100644 --- a/security-command-center/snippets/pom.xml +++ b/security-command-center/snippets/pom.xml @@ -45,6 +45,18 @@ 2.45.0 + + com.google.cloud + google-cloud-securitycentermanagement + 0.20.0 + + + + com.google.api.grpc + proto-google-cloud-securitycentermanagement-v1 + 0.20.0 + + com.google.cloud google-cloud-pubsub @@ -80,4 +92,13 @@ test + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.2.5 + + + diff --git a/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..403670363c4 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,101 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_create_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.CreateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.CustomConfig; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.ResourceSelector; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.Severity; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.type.Expr; +import java.io.IOException; + +public class CreateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/create + // replace "project_id" with a real project ID + String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + + String customModuleDisplayName = "custom_module_display_name"; + + createSecurityHealthAnalyticsCustomModule(parent, customModuleDisplayName); + } + + public static SecurityHealthAnalyticsCustomModule createSecurityHealthAnalyticsCustomModule( + String parent, String customModuleDisplayName) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, "custom_module"); + + // define the CEL expression here, change it according to the your requirements + Expr expr = + Expr.newBuilder() + .setExpression( + "has(resource.rotationPeriod) && (resource.rotationPeriod > " + + "duration('2592000s'))") + .build(); + + // define the resource selector + ResourceSelector resourceSelector = + ResourceSelector.newBuilder() + .addResourceTypes("cloudkms.googleapis.com/CryptoKey") + .build(); + + // define the custom module configuration, update the severity, description, + // recommendation below + CustomConfig customConfig = + CustomConfig.newBuilder() + .setPredicate(expr) + .setResourceSelector(resourceSelector) + .setSeverity(Severity.MEDIUM) + .setDescription("add your description here") + .setRecommendation("add your recommendation here") + .build(); + + // define the security health analytics custom module configuration, update the + // EnablementState below + SecurityHealthAnalyticsCustomModule securityHealthAnalyticsCustomModule = + SecurityHealthAnalyticsCustomModule.newBuilder() + .setName(name) + .setDisplayName(customModuleDisplayName) + .setEnablementState(EnablementState.ENABLED) + .setCustomConfig(customConfig) + .build(); + + CreateSecurityHealthAnalyticsCustomModuleRequest request = + CreateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setParent(parent) + .setSecurityHealthAnalyticsCustomModule(securityHealthAnalyticsCustomModule) + .build(); + + SecurityHealthAnalyticsCustomModule response = + client.createSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_create_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..071ea1bfb58 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_delete_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.DeleteSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import java.io.IOException; + +public class DeleteSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/delete + // replace "project_id" with a real project ID + String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + + String customModuleId = "custom_module_id"; + + deleteSecurityHealthAnalyticsCustomModule(parent, customModuleId); + } + + public static boolean deleteSecurityHealthAnalyticsCustomModule( + String parent, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + String name = + String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, customModuleId); + + DeleteSecurityHealthAnalyticsCustomModuleRequest request = + DeleteSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + client.deleteSecurityHealthAnalyticsCustomModule(request); + + return true; + } + } +} +// [END securitycenter_delete_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..0270d9e1307 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_get_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.GetSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import java.io.IOException; + +public class GetSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/get + // replace "project_id" with a real project ID + String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + + String customModuleId = "custom_module_id"; + + getSecurityHealthAnalyticsCustomModule(parent, customModuleId); + } + + public static SecurityHealthAnalyticsCustomModule getSecurityHealthAnalyticsCustomModule( + String parent, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, customModuleId); + + GetSecurityHealthAnalyticsCustomModuleRequest request = + GetSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + SecurityHealthAnalyticsCustomModule response = + client.getSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_get_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..cae1e227665 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/list + // replace "project_id" with a real project ID + String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + + listSecurityHealthAnalyticsCustomModules(parent); + } + + public static ListSecurityHealthAnalyticsCustomModulesPagedResponse + listSecurityHealthAnalyticsCustomModules(String parent) throws IOException { + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListSecurityHealthAnalyticsCustomModulesRequest request = + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder().setParent(parent).build(); + + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java new file mode 100644 index 00000000000..d35cc085be6 --- /dev/null +++ b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java @@ -0,0 +1,165 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.securitycentermanagement.v1.ListSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListSecurityHealthAnalyticsCustomModulesPagedResponse; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.testing.junit4.MultipleAttemptsRule; +import com.google.common.base.Strings; +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class SecurityHealthAnalyticsCustomModuleTest { + + private static final String parent = + String.format("organizations/%s/locations/%s", System.getenv("SCC_PROJECT_ORG_ID"), "global"); + private static final String CUSTOM_MODULE_DISPLAY_NAME = "java_sample_custom_module_test"; + private static final int MAX_ATTEMPT_COUNT = 3; + private static final int INITIAL_BACKOFF_MILLIS = 120000; // 2 minutes + + @Rule + public final MultipleAttemptsRule multipleAttemptsRule = + new MultipleAttemptsRule(MAX_ATTEMPT_COUNT, INITIAL_BACKOFF_MILLIS); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)) + .isNotEmpty(); + } + + @BeforeClass + public static void setUp() throws InterruptedException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("SCC_PROJECT_ORG_ID"); + } + + @AfterClass + public static void cleanUp() throws IOException { + // Perform cleanup after running tests + cleanupExistingCustomModules(); + } + + // cleanupExistingCustomModules clean up all the existing custom module + private static void cleanupExistingCustomModules() throws IOException { + + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListSecurityHealthAnalyticsCustomModulesRequest request = + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder().setParent(parent).build(); + + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listSecurityHealthAnalyticsCustomModules(request); + + // Iterate over the response and delete custom module one by one which start with + // java_sample_custom_module + for (SecurityHealthAnalyticsCustomModule module : response.iterateAll()) { + if (module.getDisplayName().startsWith("java_sample_custom_module")) { + String customModuleId = extractCustomModuleId(module.getName()); + deleteCustomModule(parent, customModuleId); + } + } + } + } + + // extractCustomModuleID extracts the custom module Id from the full name + private static String extractCustomModuleId(String customModuleFullName) { + if (!Strings.isNullOrEmpty(customModuleFullName)) { + Pattern pattern = Pattern.compile(".*/([^/]+)$"); + Matcher matcher = pattern.matcher(customModuleFullName); + if (matcher.find()) { + return matcher.group(1); + } + } + return ""; + } + + // createCustomModule method is for creating the custom module + private static SecurityHealthAnalyticsCustomModule createCustomModule( + String parent, String customModuleDisplayName) throws IOException { + if (!Strings.isNullOrEmpty(parent) && !Strings.isNullOrEmpty(customModuleDisplayName)) { + SecurityHealthAnalyticsCustomModule response = + CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( + parent, customModuleDisplayName); + return response; + } + return null; + } + + // deleteCustomModule method is for deleting the custom module + private static void deleteCustomModule(String parent, String customModuleId) throws IOException { + if (!Strings.isNullOrEmpty(parent) && !Strings.isNullOrEmpty(customModuleId)) { + DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( + parent, customModuleId); + } + } + + @Test + public void testCreateSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule response = + CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( + parent, CUSTOM_MODULE_DISPLAY_NAME); + + assertNotNull(response); + assertThat(response.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + } + + @Test + public void testDeleteSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule response = + createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(response.getName()); + assertTrue( + DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( + parent, customModuleId)); + } + + @Test + public void testListSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); + assertNotNull( + ListSecurityHealthAnalyticsCustomModules.listSecurityHealthAnalyticsCustomModules(parent)); + } + + @Test + public void testGetSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + SecurityHealthAnalyticsCustomModule getCustomModuleResponse = + GetSecurityHealthAnalyticsCustomModule.getSecurityHealthAnalyticsCustomModule( + parent, customModuleId); + + assertThat(getCustomModuleResponse.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + assertThat(extractCustomModuleId(getCustomModuleResponse.getName())).isEqualTo(customModuleId); + } +} From 372f0b51ad7246ea884b8018e3bdc320789457fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Fri, 15 Nov 2024 20:18:50 +0100 Subject: [PATCH 14/66] fix(tpu): improving tpu tests (#9679) * Changed tests * Created separated test for CreateTpuVm * Renamed file * Fixed imports * Fixed tests as requested in comments * Deleted redundant dependency * Fixed indentation * Fixed tests --- tpu/src/test/java/tpu/CreateTpuIT.java | 70 ++++++++++++ tpu/src/test/java/tpu/QueuedResourceIT.java | 111 +++++++++++++++----- tpu/src/test/java/tpu/TpuVmIT.java | 95 ++++++++--------- 3 files changed, 201 insertions(+), 75 deletions(-) create mode 100644 tpu/src/test/java/tpu/CreateTpuIT.java diff --git a/tpu/src/test/java/tpu/CreateTpuIT.java b/tpu/src/test/java/tpu/CreateTpuIT.java new file mode 100644 index 00000000000..cdb11831364 --- /dev/null +++ b/tpu/src/test/java/tpu/CreateTpuIT.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import org.junit.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 3) +public class CreateTpuIT { + private static final String PROJECT_ID = "project-id"; + private static final String ZONE = "asia-east1-c"; + private static final String NODE_NAME = "test-tpu"; + private static final String TPU_TYPE = "v2-8"; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.12.1"; + + @Test + public void testCreateTpuVm() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = CreateTpuVm.createTpuVm( + PROJECT_ID, ZONE, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION); + + verify(mockTpuClient, times(1)) + .createNodeAsync(any(CreateNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } +} diff --git a/tpu/src/test/java/tpu/QueuedResourceIT.java b/tpu/src/test/java/tpu/QueuedResourceIT.java index a7dbba51ff4..906d0e270df 100644 --- a/tpu/src/test/java/tpu/QueuedResourceIT.java +++ b/tpu/src/test/java/tpu/QueuedResourceIT.java @@ -17,56 +17,111 @@ package tpu; import static com.google.common.truth.Truth.assertThat; -import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.GetQueuedResourceRequest; import com.google.cloud.tpu.v2alpha1.QueuedResource; -import java.util.UUID; -import java.util.concurrent.TimeUnit; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import com.google.cloud.tpu.v2alpha1.TpuSettings; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import org.junit.Before; import org.junit.Test; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; @RunWith(JUnit4.class) -@Timeout(value = 6, unit = TimeUnit.MINUTES) +@Timeout(value = 3) public class QueuedResourceIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String PROJECT_ID = "project-id"; private static final String ZONE = "europe-west4-a"; - private static final String NODE_NAME = "test-tpu-queued-resource-network-" + UUID.randomUUID(); + private static final String NODE_NAME = "test-tpu"; private static final String TPU_TYPE = "v2-8"; private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; - private static final String QUEUED_RESOURCE_NAME = "queued-resource-network-" + UUID.randomUUID(); + private static final String QUEUED_RESOURCE_NAME = "queued-resource"; private static final String NETWORK_NAME = "default"; + private ByteArrayOutputStream bout; - public static void requireEnvVar(String envVarName) { - assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) - .that(System.getenv(envVarName)).isNotEmpty(); + @Before + public void setUp() { + bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); } - @BeforeAll - public static void setUp() { - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - requireEnvVar("GOOGLE_CLOUD_PROJECT"); + @Test + public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + QueuedResource mockQueuedResource = mock(QueuedResource.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.createQueuedResourceAsync(any(CreateQueuedResourceRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + CreateQueuedResourceWithNetwork.createQueuedResourceWithNetwork( + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); + + verify(mockTpuClient, times(1)) + .createQueuedResourceAsync(any(CreateQueuedResourceRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedQueuedResource, mockQueuedResource); + } } - @AfterAll - public static void cleanup() { - DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); + @Test + public void testGetQueuedResource() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + GetQueuedResource mockGetQueuedResource = mock(GetQueuedResource.class); + QueuedResource mockQueuedResource = mock(QueuedResource.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.getQueuedResource(any(GetQueuedResourceRequest.class))) + .thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + GetQueuedResource.getQueuedResource(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockGetQueuedResource, times(1)) + .getQueuedResource(PROJECT_ID, ZONE, NODE_NAME); + assertEquals(returnedQueuedResource, mockQueuedResource); + } } @Test - public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { + public void testDeleteTpuVm() { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.deleteQueuedResourceAsync(any(DeleteQueuedResourceRequest.class))) + .thenReturn(mockFuture); - QueuedResource queuedResource = CreateQueuedResourceWithNetwork.createQueuedResourceWithNetwork( - PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, - TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); + DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); + String output = bout.toString(); - assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getName()).isEqualTo(NODE_NAME); - assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getNetworkConfig().getNetwork() - .contains(NETWORK_NAME)); - assertThat(queuedResource.getTpu().getNodeSpec(0).getNode().getNetworkConfig().getSubnetwork() - .contains(NETWORK_NAME)); + assertThat(output).contains("Deleted Queued Resource:"); + verify(mockTpuClient, times(1)) + .deleteQueuedResourceAsync(any(DeleteQueuedResourceRequest.class)); + } } } \ No newline at end of file diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java index 761c1b1c5bd..08dfeca8eb9 100644 --- a/tpu/src/test/java/tpu/TpuVmIT.java +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -17,77 +17,78 @@ package tpu; import static com.google.common.truth.Truth.assertThat; -import static com.google.common.truth.Truth.assertWithMessage; -import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; -import com.google.api.gax.rpc.NotFoundException; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2.DeleteNodeRequest; +import com.google.cloud.tpu.v2.GetNodeRequest; import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import com.google.cloud.tpu.v2.TpuSettings; +import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.util.UUID; +import java.io.PrintStream; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; @RunWith(JUnit4.class) -@Timeout(value = 15, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +@Timeout(value = 3) public class TpuVmIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String PROJECT_ID = "project-id"; private static final String ZONE = "asia-east1-c"; - private static final String NODE_NAME = "test-tpu-" + UUID.randomUUID(); - private static final String TPU_TYPE = "v2-8"; - private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.12.1"; - private static final String NODE_PATH_NAME = - String.format("projects/%s/locations/%s/nodes/%s", PROJECT_ID, ZONE, NODE_NAME); - - public static void requireEnvVar(String envVarName) { - assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) - .that(System.getenv(envVarName)).isNotEmpty(); - } + private static final String NODE_NAME = "test-tpu"; + private static ByteArrayOutputStream bout; @BeforeAll public static void setUp() { - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - requireEnvVar("GOOGLE_CLOUD_PROJECT"); - } - - @AfterAll - public static void cleanup() throws Exception { - DeleteTpuVm.deleteTpuVm(PROJECT_ID, ZONE, NODE_NAME); - - // Test that TPUs is deleted - Assertions.assertThrows( - NotFoundException.class, - () -> GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME)); + bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); } @Test - @Order(1) - public void testCreateTpuVm() throws IOException, ExecutionException, InterruptedException { + public void testGetTpuVm() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockClient = mock(TpuClient.class); + GetTpuVm mockGetTpuVm = mock(GetTpuVm.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.getNode(any(GetNodeRequest.class))).thenReturn(mockNode); - Node node = CreateTpuVm.createTpuVm( - PROJECT_ID, ZONE, NODE_NAME, TPU_TYPE, TPU_SOFTWARE_VERSION); + Node returnedNode = GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME); - assertNotNull(node); - assertThat(node.getName().equals(NODE_NAME)); - assertThat(node.getAcceleratorType().equals(TPU_TYPE)); + verify(mockGetTpuVm, times(1)) + .getTpuVm(PROJECT_ID, ZONE, NODE_NAME); + assertThat(returnedNode).isEqualTo(mockNode); + } } @Test - @Order(2) - public void testGetTpuVm() throws IOException { - Node node = GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME); + public void testDeleteTpuVm() throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.deleteNodeAsync(any(DeleteNodeRequest.class))) + .thenReturn(mockFuture); + + DeleteTpuVm.deleteTpuVm(PROJECT_ID, ZONE, NODE_NAME); + String output = bout.toString(); - assertNotNull(node); - assertThat(node.getName()).isEqualTo(NODE_PATH_NAME); + assertThat(output).contains("TPU VM deleted"); + verify(mockTpuClient, times(1)).deleteNodeAsync(any(DeleteNodeRequest.class)); + } } } \ No newline at end of file From 15a748a8cb61d04a0dded5d611d5a3f3b1f3eadc Mon Sep 17 00:00:00 2001 From: Jacek Spalinski <69755075+jacspa96@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:21:28 +0100 Subject: [PATCH 15/66] feat(dataplex): add code samples for Search Entries (#9609) * feat(dataplex): add sample for search Entries * feat(dataplex): add integration tests for search Entries * feat(dataplex): make searchEntries return SearchEntriesResponse to support paging * feat(dataplex): adjust integration test based on code review * feat(dataplex): adjust comment regarding search scope * feat(dataplex): remove paging from Search --------- Co-authored-by: Jacek Spalinski --- .../src/main/java/dataplex/SearchEntries.java | 65 +++++++++++++++++ .../test/java/dataplex/SearchEntriesIT.java | 71 +++++++++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 dataplex/snippets/src/main/java/dataplex/SearchEntries.java create mode 100644 dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java diff --git a/dataplex/snippets/src/main/java/dataplex/SearchEntries.java b/dataplex/snippets/src/main/java/dataplex/SearchEntries.java new file mode 100644 index 00000000000..25706176380 --- /dev/null +++ b/dataplex/snippets/src/main/java/dataplex/SearchEntries.java @@ -0,0 +1,65 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +// [START dataplex_search_entries] +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import com.google.cloud.dataplex.v1.Entry; +import com.google.cloud.dataplex.v1.SearchEntriesRequest; +import com.google.cloud.dataplex.v1.SearchEntriesResult; +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class SearchEntries { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + // How to write query for search: https://cloud.google.com/dataplex/docs/search-syntax + String query = "MY_QUERY"; + + List entries = searchEntries(projectId, query); + entries.forEach(entry -> System.out.println("Entry name found in search: " + entry.getName())); + } + + // Method to search Entries located in projectId and matching query + public static List searchEntries(String projectId, String query) throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (CatalogServiceClient client = CatalogServiceClient.create()) { + SearchEntriesRequest searchEntriesRequest = + SearchEntriesRequest.newBuilder() + .setPageSize(100) + // Required field, will by default limit search scope to organization under which the + // project is located + .setName(String.format("projects/%s/locations/global", projectId)) + // Optional field, will further limit search scope only to specified project + .setScope(String.format("projects/%s", projectId)) + .setQuery(query) + .build(); + + CatalogServiceClient.SearchEntriesPagedResponse searchEntriesResponse = + client.searchEntries(searchEntriesRequest); + return searchEntriesResponse.getPage().getResponse().getResultsList().stream() + // Extract Entries nested inside search results + .map(SearchEntriesResult::getDataplexEntry) + .collect(Collectors.toList()); + } + } +} +// [END dataplex_search_entries] diff --git a/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java b/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java new file mode 100644 index 00000000000..2a1d7636dd5 --- /dev/null +++ b/dataplex/snippets/src/test/java/dataplex/SearchEntriesIT.java @@ -0,0 +1,71 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.dataplex.v1.Entry; +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class SearchEntriesIT { + private static final String ID = UUID.randomUUID().toString().substring(0, 8); + private static final String LOCATION = "us-central1"; + private static final String entryGroupId = "test-entry-group-" + ID; + private static final String entryId = "test-entry-" + ID; + private static final String expectedEntry = + String.format("locations/%s/entryGroups/%s/entries/%s", LOCATION, entryGroupId, entryId); + + private static final String PROJECT_ID = requireProjectIdEnvVar(); + + private static String requireProjectIdEnvVar() { + String value = System.getenv("GOOGLE_CLOUD_PROJECT"); + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", value); + return value; + } + + @BeforeClass + public static void setUp() throws Exception { + requireProjectIdEnvVar(); + CreateEntryGroup.createEntryGroup(PROJECT_ID, LOCATION, entryGroupId); + CreateEntry.createEntry(PROJECT_ID, LOCATION, entryGroupId, entryId); + Thread.sleep(30000); + } + + @Test + public void testSearchEntries() throws IOException { + String query = "name:test-entry- AND description:description AND aspect:generic"; + List entries = SearchEntries.searchEntries(PROJECT_ID, query); + assertThat( + entries.stream() + .map(Entry::getName) + .map(entryName -> entryName.substring(entryName.indexOf("location")))) + .contains(expectedEntry); + } + + @AfterClass + public static void tearDown() throws Exception { + // Entry inside this Entry Group will be deleted automatically + DeleteEntryGroup.deleteEntryGroup(PROJECT_ID, LOCATION, entryGroupId); + } +} From 7f84b302e5eccc354941318bf134fbfa6955bff4 Mon Sep 17 00:00:00 2001 From: James Ma Date: Tue, 19 Nov 2024 15:39:06 -0800 Subject: [PATCH 16/66] Update README.md to reflect CRf rebrand (#9685) --- functions/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/functions/README.md b/functions/README.md index f093b1fa13a..5d5d6086a7b 100644 --- a/functions/README.md +++ b/functions/README.md @@ -2,25 +2,25 @@ # Google Cloud Functions Java Samples -[Cloud Functions][functions_docs] is a lightweight, event-based, asynchronous -compute solution that allows you to create small, single-purpose functions that -respond to Cloud events without the need to manage a server or a runtime -environment. +[Cloud Run functions](https://cloud.google.com/functions/docs/concepts/overview) is a lightweight, event-based, asynchronous compute solution that allows you to create small, single-purpose functions that respond to Cloud events without the need to manage a server or a runtime environment. -[functions_docs]: https://cloud.google.com/functions/docs/ +There are two versions of Cloud Run functions: + +* **Cloud Run functions**, formerly known as Cloud Functions (2nd gen), which deploys your function as services on Cloud Run, allowing you to trigger them using Eventarc and Pub/Sub. Cloud Run functions are created using `gcloud functions` or `gcloud run`. Samples for Cloud Run functions can be found in the [`functions/v2`](v2/) folder. +* **Cloud Run functions (1st gen)**, formerly known as Cloud Functions (1st gen), the original version of functions with limited event triggers and configurability. Cloud Run functions (1st gen) are created using `gcloud functions --no-gen2`. Samples for Cloud Run functions (1st generation) can be found in the current `functions/` folder. ## Samples * [Hello World](helloworld/) -* [Concepts](concepts/) +* [Concepts](v2/concepts/) * [Datastore](v2/datastore/) * [Firebase](firebase/) -* [Cloud Pub/Sub](pubsub/) +* [Cloud Pub/Sub](v2/pubsub/) * [HTTP](http/) * [Logging & Monitoring](logging/) * [Slack](slack/) -* [OCR tutorial](ocr/) -* [ImageMagick](imagemagick/) +* [OCR tutorial](v2/ocr/) +* [ImageMagick](v2/imagemagick/) * [CI/CD setup](ci_cd/) ## Running Functions Locally From bcecf8f697edc4f564636d92c2d6446897277828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:11:53 +0100 Subject: [PATCH 17/66] feat(tpu): add tpu vm create topology sample. (#9611) * Changed package, added information to CODEOWNERS * Added information to CODEOWNERS * Added timeout * Fixed parameters for test * Fixed DeleteTpuVm and naming * Added comment, created Util class * Fixed naming * Fixed whitespace * Split PR into smaller, deleted redundant code * Implemented tpu_vm_create_topology sample, created test * Changed zone * Fixed empty lines and tests, deleted cleanup method * Fixed tests * Fixed test * Fixed imports * Increased timeout to 10 sec * Fixed tests * Fixed tests * Deleted settings * Made ByteArrayOutputStream bout as local variable * Changed timeout to 10 sec --- .../java/tpu/CreateTpuWithTopologyFlag.java | 85 +++++++++++++++++++ tpu/src/main/java/tpu/GetQueuedResource.java | 1 - tpu/src/test/java/tpu/CreateTpuIT.java | 70 --------------- tpu/src/test/java/tpu/QueuedResourceIT.java | 23 +++-- tpu/src/test/java/tpu/TpuVmIT.java | 69 ++++++++++++--- 5 files changed, 155 insertions(+), 93 deletions(-) create mode 100644 tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java delete mode 100644 tpu/src/test/java/tpu/CreateTpuIT.java diff --git a/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java b/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java new file mode 100644 index 00000000000..86e7e28a007 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateTpuWithTopologyFlag.java @@ -0,0 +1,85 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_create_topology] +import com.google.cloud.tpu.v2.AcceleratorConfig; +import com.google.cloud.tpu.v2.AcceleratorConfig.Type; +import com.google.cloud.tpu.v2.CreateNodeRequest; +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class CreateTpuWithTopologyFlag { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "europe-west4-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The version of the Cloud TPU you want to create. + // Available options: TYPE_UNSPECIFIED = 0, V2 = 2, V3 = 4, V4 = 7 + Type tpuVersion = AcceleratorConfig.Type.V2; + // Software version that specifies the version of the TPU runtime to install. + // For more information, see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.17.0-pod-pjrt"; + // The physical topology of your TPU slice. + // For more information about topology for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String topology = "2x2"; + + createTpuWithTopologyFlag(projectId, zone, nodeName, tpuVersion, tpuSoftwareVersion, topology); + } + + // Creates a TPU VM with the specified name, zone, version and topology. + public static Node createTpuWithTopologyFlag(String projectId, String zone, String nodeName, + Type tpuVersion, String tpuSoftwareVersion, String topology) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + Node tpuVm = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorConfig(Node.newBuilder() + .getAcceleratorConfigBuilder() + .setType(tpuVersion) + .setTopology(topology) + .build()) + .setRuntimeVersion(tpuSoftwareVersion) + .build(); + + CreateNodeRequest request = + CreateNodeRequest.newBuilder() + .setParent(parent) + .setNodeId(nodeName) + .setNode(tpuVm) + .build(); + + return tpuClient.createNodeAsync(request).get(); + } + } +} +//[END tpu_vm_create_topology] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/GetQueuedResource.java b/tpu/src/main/java/tpu/GetQueuedResource.java index 3a510e045fe..a17c2b41f79 100644 --- a/tpu/src/main/java/tpu/GetQueuedResource.java +++ b/tpu/src/main/java/tpu/GetQueuedResource.java @@ -17,7 +17,6 @@ package tpu; //[START tpu_queued_resources_get] - import com.google.cloud.tpu.v2alpha1.GetQueuedResourceRequest; import com.google.cloud.tpu.v2alpha1.QueuedResource; import com.google.cloud.tpu.v2alpha1.TpuClient; diff --git a/tpu/src/test/java/tpu/CreateTpuIT.java b/tpu/src/test/java/tpu/CreateTpuIT.java deleted file mode 100644 index cdb11831364..00000000000 --- a/tpu/src/test/java/tpu/CreateTpuIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tpu; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import com.google.api.gax.longrunning.OperationFuture; -import com.google.cloud.tpu.v2.CreateNodeRequest; -import com.google.cloud.tpu.v2.Node; -import com.google.cloud.tpu.v2.TpuClient; -import com.google.cloud.tpu.v2.TpuSettings; -import org.junit.Test; -import org.junit.jupiter.api.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.MockedStatic; - -@RunWith(JUnit4.class) -@Timeout(value = 3) -public class CreateTpuIT { - private static final String PROJECT_ID = "project-id"; - private static final String ZONE = "asia-east1-c"; - private static final String NODE_NAME = "test-tpu"; - private static final String TPU_TYPE = "v2-8"; - private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.12.1"; - - @Test - public void testCreateTpuVm() throws Exception { - try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { - Node mockNode = mock(Node.class); - TpuClient mockTpuClient = mock(TpuClient.class); - OperationFuture mockFuture = mock(OperationFuture.class); - - mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) - .thenReturn(mockTpuClient); - when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) - .thenReturn(mockFuture); - when(mockFuture.get()).thenReturn(mockNode); - - Node returnedNode = CreateTpuVm.createTpuVm( - PROJECT_ID, ZONE, NODE_NAME, - TPU_TYPE, TPU_SOFTWARE_VERSION); - - verify(mockTpuClient, times(1)) - .createNodeAsync(any(CreateNodeRequest.class)); - verify(mockFuture, times(1)).get(); - assertEquals(returnedNode, mockNode); - } - } -} diff --git a/tpu/src/test/java/tpu/QueuedResourceIT.java b/tpu/src/test/java/tpu/QueuedResourceIT.java index 906d0e270df..ec7d9512b92 100644 --- a/tpu/src/test/java/tpu/QueuedResourceIT.java +++ b/tpu/src/test/java/tpu/QueuedResourceIT.java @@ -35,15 +35,15 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.MockedStatic; @RunWith(JUnit4.class) -@Timeout(value = 3) +@Timeout(value = 10) public class QueuedResourceIT { private static final String PROJECT_ID = "project-id"; private static final String ZONE = "europe-west4-a"; @@ -52,10 +52,10 @@ public class QueuedResourceIT { private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; private static final String QUEUED_RESOURCE_NAME = "queued-resource"; private static final String NETWORK_NAME = "default"; - private ByteArrayOutputStream bout; + private static ByteArrayOutputStream bout; - @Before - public void setUp() { + @BeforeAll + public static void setUp() { bout = new ByteArrayOutputStream(); System.setOut(new PrintStream(bout)); } @@ -75,8 +75,8 @@ public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { QueuedResource returnedQueuedResource = CreateQueuedResourceWithNetwork.createQueuedResourceWithNetwork( - PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, - TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION, NETWORK_NAME); verify(mockTpuClient, times(1)) .createQueuedResourceAsync(any(CreateQueuedResourceRequest.class)); @@ -89,7 +89,6 @@ public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { public void testGetQueuedResource() throws IOException { try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { TpuClient mockClient = mock(TpuClient.class); - GetQueuedResource mockGetQueuedResource = mock(GetQueuedResource.class); QueuedResource mockQueuedResource = mock(QueuedResource.class); mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); @@ -99,14 +98,14 @@ public void testGetQueuedResource() throws IOException { QueuedResource returnedQueuedResource = GetQueuedResource.getQueuedResource(PROJECT_ID, ZONE, NODE_NAME); - verify(mockGetQueuedResource, times(1)) - .getQueuedResource(PROJECT_ID, ZONE, NODE_NAME); + verify(mockClient, times(1)) + .getQueuedResource(any(GetQueuedResourceRequest.class)); assertEquals(returnedQueuedResource, mockQueuedResource); } } @Test - public void testDeleteTpuVm() { + public void testDeleteForceQueuedResource() { try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { TpuClient mockTpuClient = mock(TpuClient.class); OperationFuture mockFuture = mock(OperationFuture.class); diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java index 08dfeca8eb9..a640953c445 100644 --- a/tpu/src/test/java/tpu/TpuVmIT.java +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -17,6 +17,7 @@ package tpu; import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -25,6 +26,8 @@ import static org.mockito.Mockito.when; import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.tpu.v2.AcceleratorConfig; +import com.google.cloud.tpu.v2.CreateNodeRequest; import com.google.cloud.tpu.v2.DeleteNodeRequest; import com.google.cloud.tpu.v2.GetNodeRequest; import com.google.cloud.tpu.v2.Node; @@ -34,7 +37,6 @@ import java.io.IOException; import java.io.PrintStream; import java.util.concurrent.ExecutionException; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; @@ -42,17 +44,38 @@ import org.mockito.MockedStatic; @RunWith(JUnit4.class) -@Timeout(value = 3) +@Timeout(value = 10) public class TpuVmIT { private static final String PROJECT_ID = "project-id"; private static final String ZONE = "asia-east1-c"; private static final String NODE_NAME = "test-tpu"; - private static ByteArrayOutputStream bout; + private static final String TPU_TYPE = "v2-8"; + private static final AcceleratorConfig.Type ACCELERATOR_TYPE = AcceleratorConfig.Type.V2; + private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; + private static final String TOPOLOGY = "2x2"; - @BeforeAll - public static void setUp() { - bout = new ByteArrayOutputStream(); - System.setOut(new PrintStream(bout)); + @Test + public void testCreateTpuVm() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(() -> TpuClient.create(any(TpuSettings.class))) + .thenReturn(mockTpuClient); + when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = CreateTpuVm.createTpuVm( + PROJECT_ID, ZONE, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION); + + verify(mockTpuClient, times(1)) + .createNodeAsync(any(CreateNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } } @Test @@ -60,21 +83,22 @@ public void testGetTpuVm() throws IOException { try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { Node mockNode = mock(Node.class); TpuClient mockClient = mock(TpuClient.class); - GetTpuVm mockGetTpuVm = mock(GetTpuVm.class); mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); when(mockClient.getNode(any(GetNodeRequest.class))).thenReturn(mockNode); Node returnedNode = GetTpuVm.getTpuVm(PROJECT_ID, ZONE, NODE_NAME); - verify(mockGetTpuVm, times(1)) - .getTpuVm(PROJECT_ID, ZONE, NODE_NAME); + verify(mockClient, times(1)) + .getNode(any(GetNodeRequest.class)); assertThat(returnedNode).isEqualTo(mockNode); } } @Test public void testDeleteTpuVm() throws IOException, ExecutionException, InterruptedException { + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + System.setOut(new PrintStream(bout)); try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { TpuClient mockTpuClient = mock(TpuClient.class); OperationFuture mockFuture = mock(OperationFuture.class); @@ -89,6 +113,31 @@ public void testDeleteTpuVm() throws IOException, ExecutionException, Interrupte assertThat(output).contains("TPU VM deleted"); verify(mockTpuClient, times(1)).deleteNodeAsync(any(DeleteNodeRequest.class)); + + bout.close(); + } + } + + @Test + public void testCreateTpuVmWithTopologyFlag() + throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode = mock(Node.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.createNodeAsync(any(CreateNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + Node returnedNode = CreateTpuWithTopologyFlag.createTpuWithTopologyFlag( + PROJECT_ID, ZONE, NODE_NAME, ACCELERATOR_TYPE, + TPU_SOFTWARE_VERSION, TOPOLOGY); + + verify(mockTpuClient, times(1)) + .createNodeAsync(any(CreateNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); } } } \ No newline at end of file From d6ae8fc9673fbb6737d21f98041cce37e343610c Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 16:09:09 -0500 Subject: [PATCH 18/66] chore: add basic foundation: logging, testing, cli parsing (#9688) --- bigtable/bigtable-proxy/.gitignore | 38 ++++++ bigtable/bigtable-proxy/pom.xml | 129 ++++++++++++++++++ .../cloud/bigtable/examples/proxy/Main.java | 37 +++++ .../examples/proxy/commands/package-info.java | 18 +++ .../bigtable/examples/proxy/package-info.java | 17 +++ .../src/main/resources/logback.xml | 14 ++ 6 files changed, 253 insertions(+) create mode 100644 bigtable/bigtable-proxy/.gitignore create mode 100644 bigtable/bigtable-proxy/pom.xml create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java create mode 100644 bigtable/bigtable-proxy/src/main/resources/logback.xml diff --git a/bigtable/bigtable-proxy/.gitignore b/bigtable/bigtable-proxy/.gitignore new file mode 100644 index 00000000000..af665abb669 --- /dev/null +++ b/bigtable/bigtable-proxy/.gitignore @@ -0,0 +1,38 @@ +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml new file mode 100644 index 00000000000..57d60d56dec --- /dev/null +++ b/bigtable/bigtable-proxy/pom.xml @@ -0,0 +1,129 @@ + + + 4.0.0 + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + com.google.cloud.bigtable + bigtable-proxy + 0.0.1-SNAPSHOT + + + 11 + 11 + UTF-8 + + + 26.50.0 + + 2.0.16 + 1.5.12 + 1.11.0 + 4.7.6 + 4.13.2 + 1.4.4 + + + + + + com.google.cloud + libraries-bom + ${libraries-bom.version} + pom + import + + + + + + + + org.slf4j + slf4j-api + ${slf4j.version} + + + org.slf4j + jul-to-slf4j + ${slf4j.version} + + + ch.qos.logback + logback-classic + ${logback.version} + + + + + com.google.guava + guava + + + + com.google.auto.value + auto-value-annotations + ${auto-value.version} + provided + + + info.picocli + picocli + ${picocli.version} + + + + + junit + junit + ${junit.version} + test + + + com.google.truth + truth + ${truth.version} + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.13.0 + + + + info.picocli + picocli-codegen + ${picocli.version} + + + com.google.auto.value + auto-value + ${auto-value.version} + + + + + -Aproject=${project.groupId}/${project.artifactId} + + + + + + maven-surefire-plugin + 3.5.2 + + + + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java new file mode 100644 index 00000000000..2b3abbfc09e --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.bridge.SLF4JBridgeHandler; +import picocli.CommandLine; +import picocli.CommandLine.Command; + +/** + * Main entry point for proxy commands under {@link + * com.google.cloud.bigtable.examples.proxy.commands}. + */ +@Command(subcommands = {}) +public final class Main { + private static final Logger LOGGER = LoggerFactory.getLogger(Main.class); + + public static void main(String[] args) { + SLF4JBridgeHandler.install(); + new CommandLine(new Main()).execute(args); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java new file mode 100644 index 00000000000..e3b143a9fe9 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Contains all the command implementations for the proxy server. */ +package com.google.cloud.bigtable.examples.proxy.commands; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java new file mode 100644 index 00000000000..6175827d83f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/package-info.java @@ -0,0 +1,17 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy; diff --git a/bigtable/bigtable-proxy/src/main/resources/logback.xml b/bigtable/bigtable-proxy/src/main/resources/logback.xml new file mode 100644 index 00000000000..4b19e5c3773 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + From 5042516e352b01b650898cfd7efc6092dcaa32e6 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 16:33:13 -0500 Subject: [PATCH 19/66] feat: add basic proxy (#9689) stack-info: PR: https://github.com/GoogleCloudPlatform/java-docs-samples/pull/9689, branch: igorbernstein2/stack/2 --- bigtable/bigtable-proxy/pom.xml | 46 ++ .../cloud/bigtable/examples/proxy/Main.java | 3 +- .../examples/proxy/commands/Endpoint.java | 49 ++ .../examples/proxy/commands/Serve.java | 125 +++++ .../examples/proxy/core/ByteMarshaller.java | 40 ++ .../examples/proxy/core/CallProxy.java | 170 +++++++ .../examples/proxy/core/ProxyHandler.java | 47 ++ .../examples/proxy/core/Registry.java | 54 ++ .../examples/proxy/commands/EndpointTest.java | 56 ++ .../proxy/commands/ServeParsingTest.java | 58 +++ .../examples/proxy/commands/ServeTest.java | 480 ++++++++++++++++++ .../examples/proxy/utils/ContextSubject.java | 51 ++ .../examples/proxy/utils/MetadataSubject.java | 57 +++ 13 files changed, 1235 insertions(+), 1 deletion(-) create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index 57d60d56dec..254d52ec09a 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -44,6 +44,47 @@ + + + io.grpc + grpc-api + + + io.grpc + grpc-core + + + io.grpc + grpc-netty-shaded + + + + + + com.google.api.grpc + grpc-google-cloud-bigtable-v2 + + + com.google.api.grpc + proto-google-cloud-bigtable-v2 + + + com.google.api.grpc + grpc-google-cloud-bigtable-admin-v2 + + + com.google.api.grpc + proto-google-cloud-bigtable-admin-v2 + + + com.google.api.grpc + grpc-google-common-protos + + + com.google.api.grpc + proto-google-common-protos + + org.slf4j @@ -80,6 +121,11 @@ + + io.grpc + grpc-testing + test + junit junit diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java index 2b3abbfc09e..54d48f334d1 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy; +import com.google.cloud.bigtable.examples.proxy.commands.Serve; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.bridge.SLF4JBridgeHandler; @@ -26,7 +27,7 @@ * Main entry point for proxy commands under {@link * com.google.cloud.bigtable.examples.proxy.commands}. */ -@Command(subcommands = {}) +@Command(subcommands = {Serve.class}) public final class Main { private static final Logger LOGGER = LoggerFactory.getLogger(Main.class); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java new file mode 100644 index 00000000000..4319cdbfcfe --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Endpoint.java @@ -0,0 +1,49 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import picocli.CommandLine.ITypeConverter; + +@AutoValue +abstract class Endpoint { + abstract String getName(); + + abstract int getPort(); + + @Override + public String toString() { + return String.format("%s:%d", getName(), getPort()); + } + + static Endpoint create(String name, int port) { + return new AutoValue_Endpoint(name, port); + } + + static class ArgConverter implements ITypeConverter { + @Override + public Endpoint convert(String s) throws Exception { + int i = s.lastIndexOf(":"); + Preconditions.checkArgument(i > 0, "endpoint must of the form `name:port`"); + + String name = s.substring(0, i); + int port = Integer.parseInt(s.substring(i + 1)); + return Endpoint.create(name, port); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java new file mode 100644 index 00000000000..7ce585cdbd0 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -0,0 +1,125 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; +import com.google.cloud.bigtable.examples.proxy.core.Registry; +import com.google.common.collect.ImmutableMap; +import com.google.longrunning.OperationsGrpc; +import io.grpc.InsecureServerCredentials; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Server; +import io.grpc.ServerCallHandler; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import picocli.CommandLine.Command; +import picocli.CommandLine.Help.Visibility; +import picocli.CommandLine.Option; + +@Command(name = "serve", mixinStandardHelpOptions = true, description = "Start the proxy server") +public class Serve implements Callable { + private static final Logger LOGGER = LoggerFactory.getLogger(Serve.class); + + @Option( + names = "--listen-port", + required = true, + description = "Local port to accept connections on") + int listenPort; + + @Option(names = "--useragent", showDefaultValue = Visibility.ALWAYS) + String userAgent = "bigtable-java-proxy"; + + @Option( + names = "--bigtable-data-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint dataEndpoint = Endpoint.create("bigtable.googleapis.com", 443); + + @Option( + names = "--bigtable-admin-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint adminEndpoint = Endpoint.create("bigtableadmin.googleapis.com", 443); + + ManagedChannel adminChannel = null; + ManagedChannel dataChannel = null; + Server server; + + @Override + public Void call() throws Exception { + start(); + server.awaitTermination(); + cleanup(); + return null; + } + + void start() throws IOException { + if (dataChannel == null) { + dataChannel = + ManagedChannelBuilder.forAddress(dataEndpoint.getName(), dataEndpoint.getPort()) + .userAgent(userAgent) + .maxInboundMessageSize(256 * 1024 * 1024) + .disableRetry() + .keepAliveTime(30, TimeUnit.SECONDS) + .keepAliveTimeout(10, TimeUnit.SECONDS) + .build(); + } + if (adminChannel == null) { + adminChannel = + ManagedChannelBuilder.forAddress(adminEndpoint.getName(), adminEndpoint.getPort()) + .userAgent(userAgent) + .disableRetry() + .build(); + } + + Map> serviceMap = + ImmutableMap.of( + BigtableGrpc.SERVICE_NAME, + new ProxyHandler<>(dataChannel), + BigtableInstanceAdminGrpc.SERVICE_NAME, + new ProxyHandler<>(adminChannel), + BigtableTableAdminGrpc.SERVICE_NAME, + new ProxyHandler<>(adminChannel), + OperationsGrpc.SERVICE_NAME, + new ProxyHandler<>(adminChannel)); + + server = + NettyServerBuilder.forAddress( + new InetSocketAddress("localhost", listenPort), InsecureServerCredentials.create()) + .fallbackHandlerRegistry(new Registry(serviceMap)) + .maxInboundMessageSize(256 * 1024 * 1024) + .build(); + + server.start(); + LOGGER.info("Listening on port {}", server.getPort()); + } + + void cleanup() throws InterruptedException { + dataChannel.shutdown(); + adminChannel.shutdown(); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java new file mode 100644 index 00000000000..e8d3611045f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ByteMarshaller.java @@ -0,0 +1,40 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.common.io.ByteStreams; +import io.grpc.MethodDescriptor; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +class ByteMarshaller implements MethodDescriptor.Marshaller { + + @Override + public byte[] parse(InputStream stream) { + try { + return ByteStreams.toByteArray(stream); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + @Override + public InputStream stream(byte[] value) { + return new ByteArrayInputStream(value); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java new file mode 100644 index 00000000000..620d3df45cc --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java @@ -0,0 +1,170 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import io.grpc.ClientCall; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.Status; +import javax.annotation.concurrent.GuardedBy; + +/** A per gppc RPC proxy. */ +class CallProxy { + final RequestProxy serverCallListener; + final ResponseProxy clientCallListener; + + /** + * @param serverCall the incoming server call. This will be triggered a customer client. + * @param clientCall the outgoing call to Bigtable service. This will be created by {@link + * ProxyHandler} + */ + public CallProxy(ServerCall serverCall, ClientCall clientCall) { + // Listen for incoming request messages and send them to the upstream ClientCall + // The RequestProxy will respect back pressure from the ClientCall and only request a new + // message from the incoming rpc when the upstream client call is ready, + serverCallListener = new RequestProxy(clientCall); + + // Listen from response messages from the upstream ClientCall and relay them to the customer's + // client. This will respect backpressure and request new messages from the upstream when the + // customer's client is ready. + clientCallListener = new ResponseProxy(serverCall); + } + + /** + * Back pressure aware message pump of request messages from a customer's downstream client to + * upstream Bigtable service. + * + *

Additional messages are requested from the downstream while the upstream's isReady() flag is + * set. As soon as the upstream signals that is full by returning false for isReady(). {@link + * RequestProxy} will remember that the need to get more messages from downstream and then wait + * until the upstream signals readiness via onClientReady(). + * + *

Please note in the current Bigtable protocol, all RPCs a client unary. Until that changes, + * this proxy will only have a single iteration. However, its designed generically to support + * future usecases. + */ + private class RequestProxy extends ServerCall.Listener { + + private final ClientCall clientCall; + + @GuardedBy("this") + private boolean needToRequest; + + public RequestProxy(ClientCall clientCall) { + this.clientCall = clientCall; + } + + @Override + public void onCancel() { + clientCall.cancel("Server cancelled", null); + } + + @Override + public void onHalfClose() { + clientCall.halfClose(); + } + + @Override + public void onMessage(ReqT message) { + clientCall.sendMessage(message); + synchronized (this) { + if (clientCall.isReady()) { + clientCallListener.serverCall.request(1); + } else { + // The outgoing call is not ready for more requests. Stop requesting additional data and + // wait for it to catch up. + needToRequest = true; + } + } + } + + @Override + public void onReady() { + clientCallListener.onServerReady(); + } + + // Called from ResponseProxy, which is a different thread than the ServerCall.Listener + // callbacks. + synchronized void onClientReady() { + if (needToRequest) { + // When the upstream client is ready for another request message from the customer's client, + // ask for one more message. + clientCallListener.serverCall.request(1); + needToRequest = false; + } + } + } + + /** + * Back pressure aware message pump of response messages from upstream Bigtable service to a + * customer's downstream client. + * + *

Additional messages are requested from the upstream while the downstream's isReady() flag is + * set. As soon as the downstream signals that is full by returning false for isReady(). {@link + * ResponseProxy} will remember that the need to get more messages from upstream and then wait + * until the downstream signals readiness via onServerReady(). + */ + private class ResponseProxy extends ClientCall.Listener { + + private final ServerCall serverCall; + + @GuardedBy("this") + private boolean needToRequest; + + public ResponseProxy(ServerCall serverCall) { + this.serverCall = serverCall; + } + + @Override + public void onClose(Status status, Metadata trailers) { + serverCall.close(status, trailers); + } + + @Override + public void onHeaders(Metadata headers) { + serverCall.sendHeaders(headers); + } + + @Override + public void onMessage(RespT message) { + serverCall.sendMessage(message); + synchronized (this) { + if (serverCall.isReady()) { + serverCallListener.clientCall.request(1); + } else { + // The incoming call is not ready for more responses. Stop requesting additional data + // and wait for it to catch up. + needToRequest = true; + } + } + } + + @Override + public void onReady() { + serverCallListener.onClientReady(); + } + + // Called from RequestProxy, which is a different thread than the ClientCall.Listener + // callbacks. + synchronized void onServerReady() { + if (needToRequest) { + serverCallListener.clientCall.request(1); + needToRequest = false; + } + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java new file mode 100644 index 00000000000..38554d86ebd --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java @@ -0,0 +1,47 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; + +/** A factory pairing of an incoming server call to an outgoing client call. */ +public final class ProxyHandler implements ServerCallHandler { + private final Channel channel; + + public ProxyHandler(Channel channel) { + this.channel = channel; + } + + @Override + public ServerCall.Listener startCall(ServerCall serverCall, Metadata headers) { + CallOptions callOptions = CallOptions.DEFAULT; + + ClientCall clientCall = + channel.newCall(serverCall.getMethodDescriptor(), callOptions); + + CallProxy proxy = new CallProxy<>(serverCall, clientCall); + clientCall.start(proxy.clientCallListener, headers); + serverCall.request(1); + clientCall.request(1); + return proxy.serverCallListener; + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java new file mode 100644 index 00000000000..bed62c292e0 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/Registry.java @@ -0,0 +1,54 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.core; + +import com.google.common.collect.ImmutableMap; +import io.grpc.HandlerRegistry; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCallHandler; +import io.grpc.ServerMethodDefinition; +import java.util.Map; + +/** + * Contains the service name -> handler mapping. This acts as an aggregate service. + * + *

The handlers treat requests and responses as raw byte arrays. + */ +public class Registry extends HandlerRegistry { + private final MethodDescriptor.Marshaller byteMarshaller = new ByteMarshaller(); + private final Map> serviceMap; + + public Registry(Map> serviceMap) { + this.serviceMap = ImmutableMap.copyOf(serviceMap); + } + + @Override + public ServerMethodDefinition lookupMethod(String methodName, String authority) { + MethodDescriptor methodDescriptor = + MethodDescriptor.newBuilder(byteMarshaller, byteMarshaller) + .setFullMethodName(methodName) + .setType(MethodDescriptor.MethodType.UNKNOWN) + .build(); + + ServerCallHandler handler = serviceMap.get(methodDescriptor.getServiceName()); + if (handler == null) { + return null; + } + + return ServerMethodDefinition.create(methodDescriptor, handler); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java new file mode 100644 index 00000000000..999b081a246 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/EndpointTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.bigtable.examples.proxy.commands.Endpoint.ArgConverter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class EndpointTest { + @Test + public void testOk() throws Exception { + ArgConverter argConverter = new ArgConverter(); + Endpoint result = argConverter.convert("some-endpoint:1234"); + assertThat(result).isEqualTo(Endpoint.create("some-endpoint", 1234)); + } + + @Test + public void testMissingPort() throws Exception { + ArgConverter argConverter = new ArgConverter(); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert("some-endpoint:")); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert("some-endpoint")); + } + + @Test + public void testMissingName() throws Exception { + ArgConverter argConverter = new ArgConverter(); + assertThrows(IllegalArgumentException.class, () -> argConverter.convert(":1234")); + } + + @Test + public void testIpv6() throws Exception { + ArgConverter argConverter = new ArgConverter(); + Endpoint result = argConverter.convert("[2561:1900:4545:0003:0200:F8FF:FE21:67CF]:1234"); + assertThat(result) + .isEqualTo(Endpoint.create("[2561:1900:4545:0003:0200:F8FF:FE21:67CF]", 1234)); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java new file mode 100644 index 00000000000..128544c88f6 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import picocli.CommandLine; + +@RunWith(JUnit4.class) +public class ServeParsingTest { + @Test + public void testMinimalArgs() { + Serve serve = new Serve(); + new CommandLine(serve).parseArgs("--listen-port=1234"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.userAgent).isEqualTo("bigtable-java-proxy"); + assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("bigtable.googleapis.com", 443)); + assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("bigtableadmin.googleapis.com", 443)); + } + + @Test + public void testDataEndpointOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs("--listen-port=1234", "--bigtable-data-endpoint=example.com:1234"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); + } + + @Test + public void testAdminDataEndpointOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs("--listen-port=1234", "--bigtable-admin-endpoint=example.com:1234"); + + assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java new file mode 100644 index 00000000000..912d8f5eeac --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java @@ -0,0 +1,480 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static com.google.cloud.bigtable.examples.proxy.utils.ContextSubject.assertThat; +import static com.google.cloud.bigtable.examples.proxy.utils.MetadataSubject.assertThat; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminFutureStub; +import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminImplBase; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc.BigtableTableAdminFutureStub; +import com.google.bigtable.admin.v2.BigtableTableAdminGrpc.BigtableTableAdminImplBase; +import com.google.bigtable.admin.v2.GetInstanceRequest; +import com.google.bigtable.admin.v2.GetTableRequest; +import com.google.bigtable.admin.v2.Instance; +import com.google.bigtable.admin.v2.Table; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableFutureStub; +import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.common.collect.Range; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.longrunning.GetOperationRequest; +import com.google.longrunning.Operation; +import com.google.longrunning.OperationsGrpc; +import com.google.longrunning.OperationsGrpc.OperationsFutureStub; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Context; +import io.grpc.Deadline; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.inprocess.InProcessChannelBuilder; +import io.grpc.inprocess.InProcessServerBuilder; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcCleanupRule; +import java.io.IOException; +import java.net.ServerSocket; +import java.time.Duration; +import java.util.UUID; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ServeTest { + private final String targetServerName = UUID.randomUUID().toString(); + + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule().setTimeout(1, TimeUnit.MINUTES); + + // Fake targets + private CallContextInterceptor callContextInterceptor; + private MetadataInterceptor metadataInterceptor; + private FakeDataService dataService; + private FakeInstanceAdminService instanceAdminService; + private FakeTableAdminService tableAdminService; + private OperationService operationService; + private ManagedChannel fakeServiceChannel; + + // Proxy + private Serve serve; + private ManagedChannel proxyChannel; + + @Before + public void setUp() throws IOException { + // Create the fake target + callContextInterceptor = new CallContextInterceptor(); + metadataInterceptor = new MetadataInterceptor(); + dataService = new FakeDataService(); + instanceAdminService = new FakeInstanceAdminService(); + tableAdminService = new FakeTableAdminService(); + operationService = new OperationService(); + + grpcCleanup.register( + InProcessServerBuilder.forName(targetServerName) + .intercept(callContextInterceptor) + .intercept(metadataInterceptor) + .addService(dataService) + .addService(instanceAdminService) + .addService(tableAdminService) + .addService(operationService) + .build() + .start()); + + fakeServiceChannel = + grpcCleanup.register( + InProcessChannelBuilder.forName(targetServerName).usePlaintext().build()); + + // Create the proxy + serve = createAndStartCommand(fakeServiceChannel); + + proxyChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", serve.listenPort).usePlaintext().build()); + } + + @After + public void tearDown() throws InterruptedException { + if (serve != null) { + serve.cleanup(); + } + } + + @Test + public void testDataRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = BigtableGrpc.newFutureStub(proxyChannel); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + CheckAndMutateRowResponse expectedResponse = + CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + CheckAndMutateRowResponse r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testInstanceRpcOk() + throws InterruptedException, ExecutionException, TimeoutException { + BigtableInstanceAdminFutureStub proxyStub = + BigtableInstanceAdminGrpc.newFutureStub(proxyChannel); + + GetInstanceRequest request = GetInstanceRequest.newBuilder().setName("some-instance").build(); + final ListenableFuture proxyFuture = proxyStub.getInstance(request); + StreamObserver serverObserver = + instanceAdminService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Instance expectedResponse = Instance.newBuilder().setName("some-instance").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Instance r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testTableRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + BigtableTableAdminFutureStub proxyStub = BigtableTableAdminGrpc.newFutureStub(proxyChannel); + + GetTableRequest request = GetTableRequest.newBuilder().setName("some-table").build(); + final ListenableFuture proxyFuture = proxyStub.getTable(request); + StreamObserver
serverObserver = + tableAdminService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Table expectedResponse = Table.newBuilder().setName("some-table").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Table r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testOpRpcOk() throws InterruptedException, ExecutionException, TimeoutException { + OperationsFutureStub proxyStub = OperationsGrpc.newFutureStub(proxyChannel); + + GetOperationRequest request = GetOperationRequest.newBuilder().setName("some-table").build(); + final ListenableFuture proxyFuture = proxyStub.getOperation(request); + StreamObserver serverObserver = + operationService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + if (proxyFuture.isDone()) { + proxyFuture.get(); + } + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + Operation expectedResponse = Operation.newBuilder().setName("some-table").build(); + + serverObserver.onNext(expectedResponse); + serverObserver.onCompleted(); + + Operation r = proxyFuture.get(1, TimeUnit.SECONDS); + assertThat(r).isEqualTo(expectedResponse); + } + + @Test + public void testMetadataProxy() + throws InterruptedException, ExecutionException, TimeoutException { + Metadata responseMetadata = new Metadata(); + responseMetadata.put(Key.of("resp-header", Metadata.ASCII_STRING_MARSHALLER), "resp-value"); + metadataInterceptor.responseHeaders = () -> responseMetadata; + + Metadata trailers = new Metadata(); + trailers.put(Key.of("trailer", Metadata.ASCII_STRING_MARSHALLER), "trailer-value"); + metadataInterceptor.responseTrailers = () -> trailers; + + AtomicReference clientRecvHeader = new AtomicReference<>(); + AtomicReference clientRecvTrailer = new AtomicReference<>(); + + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall<>( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put( + Key.of("client-sent-header", Metadata.ASCII_STRING_MARSHALLER), + "client-sent-header-value"); + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata headers) { + clientRecvHeader.set(headers); + super.onHeaders(headers); + } + + @Override + public void onClose(Status status, Metadata trailers) { + clientRecvTrailer.set(trailers); + super.onClose(status, trailers); + } + }, + headers); + } + }; + } + }); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + + proxyFuture.get(1, TimeUnit.SECONDS); + + assertThat(metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS)) + .hasValue("client-sent-header", "client-sent-header-value"); + + assertThat(clientRecvHeader.get()).hasValue("resp-header", "resp-value"); + assertThat(clientRecvTrailer.get()).hasValue("trailer", "trailer-value"); + } + + @Test + public void testDeadlinePropagation() + throws InterruptedException, ExecutionException, TimeoutException { + + Deadline originalDeadline = Deadline.after(10, TimeUnit.MINUTES); + + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel).withDeadline(originalDeadline); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + + proxyFuture.get(1, TimeUnit.SECONDS); + + Context serverContext = callContextInterceptor.contexts.poll(1, TimeUnit.SECONDS); + assertThat(serverContext) + .hasRemainingDeadlineThat() + .isIn(Range.closed(Duration.ofMinutes(9), Duration.ofMinutes(10))); + } + + private static Serve createAndStartCommand(ManagedChannel targetChannel) throws IOException { + for (int i = 10; i >= 0; i--) { + Serve s = new Serve(); + s.dataChannel = targetChannel; + s.adminChannel = targetChannel; + + try (ServerSocket serverSocket = new ServerSocket(0)) { + s.listenPort = serverSocket.getLocalPort(); + } + + try { + s.start(); + return s; + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + static class CallContextInterceptor implements ServerInterceptor { + BlockingQueue contexts = new LinkedBlockingDeque<>(); + + @Override + public Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + + contexts.add(Context.current()); + return next.startCall(call, headers); + } + } + + static class MetadataInterceptor implements ServerInterceptor { + private BlockingQueue requestHeaders = new LinkedBlockingDeque<>(); + volatile Supplier responseHeaders = Metadata::new; + volatile Supplier responseTrailers = Metadata::new; + + @Override + public Listener interceptCall( + ServerCall call, Metadata metadata, ServerCallHandler next) { + requestHeaders.add(metadata); + return next.startCall( + new SimpleForwardingServerCall(call) { + @Override + public void sendHeaders(Metadata headers) { + headers.merge(responseHeaders.get()); + super.sendHeaders(headers); + } + + @Override + public void close(Status status, Metadata trailers) { + trailers.merge(responseTrailers.get()); + super.close(status, trailers); + } + }, + metadata); + } + } + + private static class FakeDataService extends BigtableImplBase { + private final ConcurrentHashMap< + CheckAndMutateRowRequest, BlockingDeque>> + calls = new ConcurrentHashMap<>(); + + @Override + public void checkAndMutateRow( + CheckAndMutateRowRequest request, + StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class FakeInstanceAdminService extends BigtableInstanceAdminImplBase { + private final ConcurrentHashMap>> + calls = new ConcurrentHashMap<>(); + + @Override + public void getInstance(GetInstanceRequest request, StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class FakeTableAdminService extends BigtableTableAdminImplBase { + private final ConcurrentHashMap>> calls = + new ConcurrentHashMap<>(); + + @Override + public void getTable(GetTableRequest request, StreamObserver
responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } + + private static class OperationService extends OperationsGrpc.OperationsImplBase { + private final ConcurrentHashMap>> + calls = new ConcurrentHashMap<>(); + + @Override + public void getOperation( + GetOperationRequest request, StreamObserver responseObserver) { + calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .add(responseObserver); + } + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java new file mode 100644 index 00000000000..0babab53c6c --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/ContextSubject.java @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.utils; + +import static com.google.common.truth.Truth.assertAbout; + +import com.google.common.truth.ComparableSubject; +import com.google.common.truth.FailureMetadata; +import com.google.common.truth.Subject; +import io.grpc.Context; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import org.jspecify.annotations.Nullable; + +public class ContextSubject extends Subject { + private final Context context; + + public ContextSubject(FailureMetadata metadata, @Nullable Context actual) { + super(metadata, actual); + this.context = actual; + } + + public static Factory context() { + return ContextSubject::new; + } + + public static ContextSubject assertThat(Context context) { + return assertAbout(context()).that(context); + } + + public ComparableSubject hasRemainingDeadlineThat() { + Duration remaining = + Duration.ofMillis(context.getDeadline().timeRemaining(TimeUnit.MILLISECONDS)); + + return check("getDeadline().timeRemaining()").that(remaining); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java new file mode 100644 index 00000000000..591164d13d3 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.utils; + +import static com.google.common.truth.Truth.assertAbout; + +import com.google.common.truth.FailureMetadata; +import com.google.common.truth.Subject; +import io.grpc.Metadata; +import org.jspecify.annotations.Nullable; + +public class MetadataSubject extends Subject { + private final Metadata metadata; + + public MetadataSubject(FailureMetadata metadata, @Nullable Metadata actual) { + super(metadata, actual); + this.metadata = actual; + } + + public static Factory metadata() { + return MetadataSubject::new; + } + + public static MetadataSubject assertThat(Metadata metadata) { + return assertAbout(metadata()).that(metadata); + } + + public void hasKey(String key) { + hasKey(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)); + } + + public void hasKey(Metadata.Key key) { + check("keys()").that(metadata.keys()).contains(key); + } + + public void hasValue(String key, String value) { + hasValue(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER), value); + } + + public void hasValue(Metadata.Key key, T value) { + check("get(" + key + ")").that(metadata.get(key)).isEqualTo(value); + } +} From f97aaff9533c35039e719f17f83aff02b925486a Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 16:44:01 -0500 Subject: [PATCH 20/66] feat: move credential handling responsibility to the proxy (#9690) stack-info: PR: https://github.com/GoogleCloudPlatform/java-docs-samples/pull/9690, branch: igorbernstein2/stack/3 --- bigtable/bigtable-proxy/pom.xml | 8 ++ .../examples/proxy/commands/Serve.java | 17 ++- .../examples/proxy/core/ProxyHandler.java | 13 +- .../examples/proxy/commands/ServeTest.java | 119 +++++++++++++++++- .../examples/proxy/utils/MetadataSubject.java | 15 ++- 5 files changed, 163 insertions(+), 9 deletions(-) diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index 254d52ec09a..abd7e9e508a 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -57,6 +57,14 @@ io.grpc grpc-netty-shaded + + io.grpc + grpc-auth + + + com.google.auth + google-auth-library-oauth2-http + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index 7ce585cdbd0..b15301b4b49 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -16,6 +16,8 @@ package com.google.cloud.bigtable.examples.proxy.commands; +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; import com.google.bigtable.v2.BigtableGrpc; @@ -23,11 +25,13 @@ import com.google.cloud.bigtable.examples.proxy.core.Registry; import com.google.common.collect.ImmutableMap; import com.google.longrunning.OperationsGrpc; +import io.grpc.CallCredentials; import io.grpc.InsecureServerCredentials; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.Server; import io.grpc.ServerCallHandler; +import io.grpc.auth.MoreCallCredentials; import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; import java.io.IOException; import java.net.InetSocketAddress; @@ -67,6 +71,7 @@ public class Serve implements Callable { ManagedChannel adminChannel = null; ManagedChannel dataChannel = null; + Credentials credentials = null; Server server; @Override @@ -95,17 +100,21 @@ void start() throws IOException { .disableRetry() .build(); } + if (credentials == null) { + credentials = GoogleCredentials.getApplicationDefault(); + } + CallCredentials callCredentials = MoreCallCredentials.from(credentials); Map> serviceMap = ImmutableMap.of( BigtableGrpc.SERVICE_NAME, - new ProxyHandler<>(dataChannel), + new ProxyHandler<>(dataChannel, callCredentials), BigtableInstanceAdminGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel), + new ProxyHandler<>(adminChannel, callCredentials), BigtableTableAdminGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel), + new ProxyHandler<>(adminChannel, callCredentials), OperationsGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel)); + new ProxyHandler<>(adminChannel, callCredentials)); server = NettyServerBuilder.forAddress( diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java index 38554d86ebd..5a136262018 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy.core; +import io.grpc.CallCredentials; import io.grpc.CallOptions; import io.grpc.Channel; import io.grpc.ClientCall; @@ -25,15 +26,23 @@ /** A factory pairing of an incoming server call to an outgoing client call. */ public final class ProxyHandler implements ServerCallHandler { + private static final Metadata.Key AUTHORIZATION_KEY = + Metadata.Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER); + private final Channel channel; + private final CallCredentials callCredentials; - public ProxyHandler(Channel channel) { + public ProxyHandler(Channel channel, CallCredentials callCredentials) { this.channel = channel; + this.callCredentials = callCredentials; } @Override public ServerCall.Listener startCall(ServerCall serverCall, Metadata headers) { - CallOptions callOptions = CallOptions.DEFAULT; + // Strip incoming credentials + headers.removeAll(AUTHORIZATION_KEY); + // Inject proxy credentials + CallOptions callOptions = CallOptions.DEFAULT.withCallCredentials(callCredentials); ClientCall clientCall = channel.newCall(serverCall.getMethodDescriptor(), callOptions); diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java index 912d8f5eeac..c55cc2bcec4 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java @@ -21,6 +21,7 @@ import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; +import com.google.auth.Credentials; import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminFutureStub; import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc.BigtableInstanceAdminImplBase; @@ -36,6 +37,7 @@ import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.common.collect.Lists; import com.google.common.collect.Range; import com.google.common.util.concurrent.ListenableFuture; import com.google.longrunning.GetOperationRequest; @@ -67,7 +69,10 @@ import io.grpc.testing.GrpcCleanupRule; import java.io.IOException; import java.net.ServerSocket; +import java.net.URI; import java.time.Duration; +import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.BlockingDeque; import java.util.concurrent.BlockingQueue; @@ -100,6 +105,7 @@ public class ServeTest { private FakeTableAdminService tableAdminService; private OperationService operationService; private ManagedChannel fakeServiceChannel; + private FakeCredentials fakeCredentials; // Proxy private Serve serve; @@ -115,6 +121,8 @@ public void setUp() throws IOException { tableAdminService = new FakeTableAdminService(); operationService = new OperationService(); + fakeCredentials = new FakeCredentials(); + grpcCleanup.register( InProcessServerBuilder.forName(targetServerName) .intercept(callContextInterceptor) @@ -131,7 +139,9 @@ public void setUp() throws IOException { InProcessChannelBuilder.forName(targetServerName).usePlaintext().build()); // Create the proxy - serve = createAndStartCommand(fakeServiceChannel); + // Inject fakes for upstream calls. For unit tests we want to shim communications to the + // bigtable service. + serve = createAndStartCommand(fakeServiceChannel, fakeCredentials); proxyChannel = grpcCleanup.register( @@ -363,11 +373,86 @@ public void testDeadlinePropagation() .isIn(Range.closed(Duration.ofMinutes(9), Duration.ofMinutes(10))); } - private static Serve createAndStartCommand(ManagedChannel targetChannel) throws IOException { + @Test + public void testCredentials() throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = BigtableGrpc.newFutureStub(proxyChannel); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + proxyFuture.get(1, TimeUnit.SECONDS); + + assertThat(metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS)) + .hasValue("authorization", "fake-token"); + } + + @Test + public void testCredentialsClobber() + throws InterruptedException, ExecutionException, TimeoutException { + BigtableFutureStub proxyStub = + BigtableGrpc.newFutureStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put( + Metadata.Key.of("authorization", Metadata.ASCII_STRING_MARSHALLER), + "pre-proxied-value"); + super.start(responseListener, headers); + } + }; + } + }); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder().setTableName("some-table").build(); + final ListenableFuture proxyFuture = + proxyStub.checkAndMutateRow(request); + StreamObserver serverObserver = + dataService + .calls + .computeIfAbsent(request, (ignored) -> new LinkedBlockingDeque<>()) + .poll(1, TimeUnit.SECONDS); + + assertWithMessage("Timed out waiting for the proxied RPC on the fake server") + .that(serverObserver) + .isNotNull(); + + serverObserver.onNext(CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + serverObserver.onCompleted(); + proxyFuture.get(1, TimeUnit.SECONDS); + + Metadata serverRequestHeaders = metadataInterceptor.requestHeaders.poll(1, TimeUnit.SECONDS); + assertThat(serverRequestHeaders).hasValue("authorization", "fake-token"); + } + + private static Serve createAndStartCommand( + ManagedChannel targetChannel, FakeCredentials targetCredentials) throws IOException { for (int i = 10; i >= 0; i--) { Serve s = new Serve(); s.dataChannel = targetChannel; s.adminChannel = targetChannel; + s.credentials = targetCredentials; try (ServerSocket serverSocket = new ServerSocket(0)) { s.listenPort = serverSocket.getLocalPort(); @@ -477,4 +562,34 @@ public void getOperation( .add(responseObserver); } } + + private static class FakeCredentials extends Credentials { + private static final String HEADER_NAME = "authorization"; + private String fakeValue = "fake-token"; + + @Override + public String getAuthenticationType() { + return "fake"; + } + + @Override + public Map> getRequestMetadata(URI uri) throws IOException { + return Map.of(HEADER_NAME, Lists.newArrayList(fakeValue)); + } + + @Override + public boolean hasRequestMetadata() { + return true; + } + + @Override + public boolean hasRequestMetadataOnly() { + return true; + } + + @Override + public void refresh() throws IOException { + // noop + } + } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java index 591164d13d3..4494c52dc94 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/utils/MetadataSubject.java @@ -21,6 +21,8 @@ import com.google.common.truth.FailureMetadata; import com.google.common.truth.Subject; import io.grpc.Metadata; +import java.util.ArrayList; +import java.util.Optional; import org.jspecify.annotations.Nullable; public class MetadataSubject extends Subject { @@ -52,6 +54,17 @@ public void hasValue(String key, String value) { } public void hasValue(Metadata.Key key, T value) { - check("get(" + key + ")").that(metadata.get(key)).isEqualTo(value); + Iterable actualValues = Optional.ofNullable(metadata.getAll(key)).orElse(new ArrayList<>()); + check("get(" + key + ")").that(actualValues).containsExactly(value); + } + + public void containsValue(String key, String value) { + check("get(" + key + ")") + .that(metadata.getAll(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER))) + .contains(value); + } + + public void containsValue(Metadata.Key key, T value) { + check("get(" + key + ")").that(metadata.getAll(key)).contains(value); } } From 44fd58c7b9657971ac34862749cbfa66b13cfe9b Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 16:51:32 -0500 Subject: [PATCH 21/66] feat: add metrics (#9691) stack-info: PR: https://github.com/GoogleCloudPlatform/java-docs-samples/pull/9691, branch: igorbernstein2/stack/4 --- bigtable/bigtable-proxy/pom.xml | 39 ++ .../examples/proxy/commands/Serve.java | 25 +- .../examples/proxy/core/CallProxy.java | 10 +- .../examples/proxy/core/ProxyHandler.java | 18 +- .../examples/proxy/metrics/CallLabels.java | 168 +++++++ .../metrics/InstrumentedCallCredentials.java | 70 +++ .../examples/proxy/metrics/Metrics.java | 39 ++ .../examples/proxy/metrics/MetricsImpl.java | 230 +++++++++ .../examples/proxy/metrics/NoopMetrics.java | 47 ++ .../examples/proxy/metrics/Tracer.java | 110 +++++ .../proxy/commands/ServeMetricsTest.java | 443 ++++++++++++++++++ .../proxy/commands/ServeParsingTest.java | 21 +- .../examples/proxy/commands/ServeTest.java | 2 + .../proxy/metrics/CallLabelsTest.java | 157 +++++++ 14 files changed, 1366 insertions(+), 13 deletions(-) create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index abd7e9e508a..db972f2c19d 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -23,6 +23,8 @@ 26.50.0 + 1.44.1 + 0.33.0 2.0.16 1.5.12 1.11.0 @@ -40,6 +42,20 @@ pom import + + io.opentelemetry + opentelemetry-bom + ${otel.version} + pom + import + + + org.mockito + mockito-bom + 5.14.2 + pom + import + @@ -93,6 +109,23 @@ proto-google-common-protos + + + io.opentelemetry + opentelemetry-sdk + + + + io.opentelemetry + opentelemetry-sdk-metrics + + + + com.google.cloud.opentelemetry + exporter-metrics + ${exporter-metrics.version} + + org.slf4j @@ -146,6 +179,12 @@ ${truth.version} test + + org.mockito + mockito-core + + test + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index b15301b4b49..5c62550d28c 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -23,6 +23,9 @@ import com.google.bigtable.v2.BigtableGrpc; import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; import com.google.cloud.bigtable.examples.proxy.core.Registry; +import com.google.cloud.bigtable.examples.proxy.metrics.InstrumentedCallCredentials; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; import com.google.common.collect.ImmutableMap; import com.google.longrunning.OperationsGrpc; import io.grpc.CallCredentials; @@ -69,10 +72,17 @@ public class Serve implements Callable { showDefaultValue = Visibility.ALWAYS) Endpoint adminEndpoint = Endpoint.create("bigtableadmin.googleapis.com", 443); + @Option( + names = "--metrics-project-id", + required = true, + description = "The project id where metrics should be exported") + String metricsProjectId = null; + ManagedChannel adminChannel = null; ManagedChannel dataChannel = null; Credentials credentials = null; Server server; + Metrics metrics; @Override public Void call() throws Exception { @@ -103,18 +113,23 @@ void start() throws IOException { if (credentials == null) { credentials = GoogleCredentials.getApplicationDefault(); } - CallCredentials callCredentials = MoreCallCredentials.from(credentials); + CallCredentials callCredentials = + new InstrumentedCallCredentials(MoreCallCredentials.from(credentials)); + + if (metrics == null) { + metrics = new MetricsImpl(credentials, metricsProjectId); + } Map> serviceMap = ImmutableMap.of( BigtableGrpc.SERVICE_NAME, - new ProxyHandler<>(dataChannel, callCredentials), + new ProxyHandler<>(metrics, dataChannel, callCredentials), BigtableInstanceAdminGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel, callCredentials), + new ProxyHandler<>(metrics, adminChannel, callCredentials), BigtableTableAdminGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel, callCredentials), + new ProxyHandler<>(metrics, adminChannel, callCredentials), OperationsGrpc.SERVICE_NAME, - new ProxyHandler<>(adminChannel, callCredentials)); + new ProxyHandler<>(metrics, adminChannel, callCredentials)); server = NettyServerBuilder.forAddress( diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java index 620d3df45cc..8e4c97db0b7 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy.core; +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import io.grpc.ClientCall; import io.grpc.Metadata; import io.grpc.ServerCall; @@ -24,15 +25,20 @@ /** A per gppc RPC proxy. */ class CallProxy { + + private final Tracer tracer; final RequestProxy serverCallListener; final ResponseProxy clientCallListener; /** + * @param tracer a lifecycle observer to publish metrics. * @param serverCall the incoming server call. This will be triggered a customer client. * @param clientCall the outgoing call to Bigtable service. This will be created by {@link * ProxyHandler} */ - public CallProxy(ServerCall serverCall, ClientCall clientCall) { + public CallProxy( + Tracer tracer, ServerCall serverCall, ClientCall clientCall) { + this.tracer = tracer; // Listen for incoming request messages and send them to the upstream ClientCall // The RequestProxy will respect back pressure from the ClientCall and only request a new // message from the incoming rpc when the upstream client call is ready, @@ -131,6 +137,8 @@ public ResponseProxy(ServerCall serverCall) { @Override public void onClose(Status status, Metadata trailers) { + tracer.onCallFinished(status); + serverCall.close(status, trailers); } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java index 5a136262018..421c664325e 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java @@ -16,6 +16,9 @@ package com.google.cloud.bigtable.examples.proxy.core; +import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import io.grpc.CallCredentials; import io.grpc.CallOptions; import io.grpc.Channel; @@ -29,25 +32,32 @@ public final class ProxyHandler implements ServerCallHandler AUTHORIZATION_KEY = Metadata.Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER); + private final Metrics metrics; private final Channel channel; private final CallCredentials callCredentials; - public ProxyHandler(Channel channel, CallCredentials callCredentials) { + public ProxyHandler(Metrics metrics, Channel channel, CallCredentials callCredentials) { + this.metrics = metrics; this.channel = channel; this.callCredentials = callCredentials; } @Override public ServerCall.Listener startCall(ServerCall serverCall, Metadata headers) { - // Strip incoming credentials - headers.removeAll(AUTHORIZATION_KEY); + CallLabels callLabels = CallLabels.create(serverCall.getMethodDescriptor(), headers); + Tracer tracer = new Tracer(metrics, callLabels); + // Inject proxy credentials CallOptions callOptions = CallOptions.DEFAULT.withCallCredentials(callCredentials); + callOptions = tracer.injectIntoCallOptions(callOptions); + + // Strip incoming credentials + headers.removeAll(AUTHORIZATION_KEY); ClientCall clientCall = channel.newCall(serverCall.getMethodDescriptor(), callOptions); - CallProxy proxy = new CallProxy<>(serverCall, clientCall); + CallProxy proxy = new CallProxy<>(tracer, serverCall, clientCall); clientCall.start(proxy.clientCallListener, headers); serverCall.request(1); clientCall.request(1); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java new file mode 100644 index 00000000000..b7a30825d76 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java @@ -0,0 +1,168 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.auto.value.AutoValue; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.opentelemetry.api.common.Attributes; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.Optional; + +/** + * A value class to encapsulate call identity. + * + *

This call extracts relevant information from request headers and makes it accessible to + * metrics & the upstream client. The primary headers consulted are:

+ * + *
    + *
  • {@code x-goog-request-params} - contains the resource and app profile id
  • + *
  • {@code x-goog-api-client} - contains the client info of the downstream client
  • + *
+ */ + +@AutoValue +public abstract class CallLabels { + private static final Key REQUEST_PARAMS = + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + private static final Key API_CLIENT = + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); + + enum ResourceNameType { + Parent("parent", 0), + Name("name", 1), + TableName("table_name", 2); + + private final String name; + private final int priority; + + ResourceNameType(String name, int priority) { + this.name = name; + this.priority = priority; + } + } + + @AutoValue + abstract static class ResourceName { + + abstract ResourceNameType getType(); + + abstract String getValue(); + + static ResourceName create(ResourceNameType type, String value) { + return new AutoValue_CallLabels_ResourceName(type, value); + } + } + + abstract Optional getApiClient(); + + public abstract Optional getResourceName(); + + public abstract Optional getAppProfileId(); + + abstract String getMethodName(); + + public abstract Attributes getOtelAttributes(); + + public static CallLabels create(MethodDescriptor method, Metadata headers) { + Optional apiClient = Optional.ofNullable(headers.get(API_CLIENT)); + + String requestParams = Optional.ofNullable(headers.get(REQUEST_PARAMS)).orElse(""); + String[] encodedKvPairs = requestParams.split("&"); + Optional resourceName = extractResourceName(encodedKvPairs).map(ResourceName::getValue); + Optional appProfile = extractAppProfileId(encodedKvPairs); + + return create(method, apiClient, resourceName, appProfile); + } + + public static CallLabels create( + MethodDescriptor method, + Optional apiClient, + Optional resourceName, + Optional appProfile) { + Attributes otelAttrs = + Attributes.builder() + .put(MetricsImpl.API_CLIENT_KEY, apiClient.orElse("")) + .put(MetricsImpl.RESOURCE_KEY, resourceName.orElse("")) + .put(MetricsImpl.APP_PROFILE_KEY, appProfile.orElse("")) + .put(MetricsImpl.METHOD_KEY, method.getFullMethodName()) + .build(); + return new AutoValue_CallLabels( + apiClient, resourceName, appProfile, method.getFullMethodName(), otelAttrs); + } + + private static Optional extractResourceName(String[] encodedKvPairs) { + Optional resourceName = Optional.empty(); + + for (String encodedKv : encodedKvPairs) { + String[] split = encodedKv.split("=", 2); + if (split.length != 2) { + continue; + } + String encodedKey = split[0]; + String encodedValue = split[1]; + if (encodedKey.isEmpty() || encodedValue.isEmpty()) { + continue; + } + + Optional newType = findType(encodedKey); + + if (newType.isEmpty()) { + continue; + } + // Skip if we previously found a resource name and the new resource name type has a lower + // priority + if (resourceName.isPresent() + && newType.get().priority <= resourceName.get().getType().priority) { + continue; + } + String decodedValue = percentDecode(encodedValue); + + resourceName = Optional.of(ResourceName.create(newType.get(), decodedValue)); + } + return resourceName; + } + + private static Optional findType(String encodedKey) { + String decodedKey = percentDecode(encodedKey); + + for (ResourceNameType type : ResourceNameType.values()) { + if (type.name.equals(decodedKey)) { + return Optional.of(type); + } + } + return Optional.empty(); + } + + private static Optional extractAppProfileId(String[] encodedKvPairs) { + for (String encodedPair : encodedKvPairs) { + if (!encodedPair.startsWith("app_profile_id=")) { + continue; + } + String[] parts = encodedPair.split("=", 2); + String encodedValue = parts.length > 1 ? parts[1] : ""; + return Optional.of(percentDecode(encodedValue)); + } + return Optional.empty(); + } + + private static String percentDecode(String s) { + return URLDecoder.decode(s, StandardCharsets.UTF_8); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java new file mode 100644 index 00000000000..fbc7f176a46 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.common.base.Stopwatch; +import io.grpc.CallCredentials; +import io.grpc.InternalMayRequireSpecificExecutor; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; + +public class InstrumentedCallCredentials extends CallCredentials + implements InternalMayRequireSpecificExecutor { + private final CallCredentials inner; + private final boolean specificExecutorRequired; + + public InstrumentedCallCredentials(CallCredentials inner) { + this.inner = inner; + this.specificExecutorRequired = + (inner instanceof InternalMayRequireSpecificExecutor) + && ((InternalMayRequireSpecificExecutor) inner).isSpecificExecutorRequired(); + } + + @Override + public void applyRequestMetadata( + RequestInfo requestInfo, Executor appExecutor, MetadataApplier applier) { + Tracer tracer = Tracer.extractTracerFromCallOptions(requestInfo.getCallOptions()); + final Stopwatch stopwatch = Stopwatch.createStarted(); + + inner.applyRequestMetadata( + requestInfo, + appExecutor, + new MetadataApplier() { + @Override + public void apply(Metadata headers) { + tracer.onCredentialsFetch( + Status.OK, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + applier.apply(headers); + } + + @Override + public void fail(Status status) { + tracer.onCredentialsFetch( + status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + applier.fail(status); + } + }); + } + + @Override + public boolean isSpecificExecutorRequired() { + return specificExecutorRequired; + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java new file mode 100644 index 00000000000..23e358289bf --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -0,0 +1,39 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import io.grpc.Status; +import java.time.Duration; + +public interface Metrics { + + void recordCallStarted(CallLabels labels); + + void recordCredLatency(CallLabels labels, Status status, Duration duration); + + void recordQueueLatency(CallLabels labels, Duration duration); + + void recordRequestSize(CallLabels labels, long size); + + void recordResponseSize(CallLabels labels, long size); + + void recordGfeLatency(CallLabels labels, Duration duration); + + void recordGfeHeaderMissing(CallLabels labels); + + void recordCallLatency(CallLabels labels, Status status, Duration duration); +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java new file mode 100644 index 00000000000..a69db5fd838 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -0,0 +1,230 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.auth.Credentials; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.Closeable; +import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.atomic.AtomicInteger; + +public class MetricsImpl implements Closeable, Metrics { + private static final String METRIC_PREFIX = "bigtableproxy."; + + static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("apiclient"); + static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); + static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + + private final SdkMeterProvider meterProvider; + + private final DoubleHistogram gfeLatency; + private final LongCounter gfeResponseHeadersMissing; + private final DoubleHistogram clientCredLatencies; + private final DoubleHistogram clientQueueLatencies; + private final DoubleHistogram clientCallLatencies; + private final LongCounter serverCallsStarted; + private final LongHistogram requestSizes; + private final LongHistogram responseSizes; + + private final AtomicInteger numOutstandingRpcs = new AtomicInteger(); + private final AtomicInteger maxSeen = new AtomicInteger(); + + public MetricsImpl(Credentials credentials, String projectId) throws IOException { + meterProvider = createMeterProvider(credentials, projectId); + Meter meter = meterProvider.meterBuilder("bigtableproxy").build(); + + serverCallsStarted = + meter + .counterBuilder(METRIC_PREFIX + "server.call.started") + .setDescription( + "The total number of RPCs started, including those that have not completed.") + .setUnit("{call}") + .build(); + + clientCredLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.credential.refresh.duration") + .setDescription("Latency of getting credentials") + .setUnit("ms") + .build(); + + clientQueueLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.queue.duration") + .setDescription( + "Duration of how long the outbound side of the proxy had the RPC queued") + .setUnit("ms") + .build(); + + requestSizes = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.sent_total_message_size") + .setDescription( + "Total bytes sent per call to Bigtable service (excluding metadata, grpc and" + + " transport framing bytes)") + .setUnit("by") + .ofLongs() + .build(); + + responseSizes = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.rcvd_total_message_size") + .setDescription( + "Total bytes received per call from Bigtable service (excluding metadata, grpc and" + + " transport framing bytes)") + .setUnit("by") + .ofLongs() + .build(); + + gfeLatency = + meter + .histogramBuilder(METRIC_PREFIX + "client.gfe.duration") + .setDescription( + "Latency as measured by Google load balancer from the time it " + + "received the first byte of the request until it received the first byte" + + " of the response from the Cloud Bigtable service.") + .setUnit("ms") + .build(); + + gfeResponseHeadersMissing = + meter + .counterBuilder(METRIC_PREFIX + "client.gfe.duration_missing.count") + .setDescription("Count of calls missing gfe response headers") + .setUnit("{call}") + .build(); + + clientCallLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.call.duration") + .setDescription("Total duration of how long the outbound call took") + .setUnit("ms") + .build(); + + meter + .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") + .setDescription("Number of concurrent") + .setUnit("{call}") + .ofLongs() + .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); + } + + @Override + public void close() { + meterProvider.close(); + } + + private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) + throws IOException { + MetricServiceSettings.Builder metricServiceSettingsBuilder = MetricServiceSettings.newBuilder(); + metricServiceSettingsBuilder + .setCredentialsProvider(FixedCredentialsProvider.create(credentials)) + .setTransportChannelProvider( + FixedTransportChannelProvider.create( + GrpcTransportChannel.create( + ManagedChannelBuilder.forTarget( + MetricConfiguration.DEFAULT_METRIC_SERVICE_ENDPOINT) + // default 8 KiB + .maxInboundMetadataSize(16 * 1000) + .build()))) + .createMetricDescriptorSettings() + .setSimpleTimeoutNoRetriesDuration( + Duration.ofMillis(MetricConfiguration.DEFAULT_DEADLINE.toMillis())) + .build(); + + MetricConfiguration config = + MetricConfiguration.builder() + .setProjectId(projectId) + .setMetricServiceSettings(metricServiceSettingsBuilder.build()) + .setInstrumentationLibraryLabelsEnabled(false) + .build(); + + MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config); + + return SdkMeterProvider.builder() + .registerMetricReader( + PeriodicMetricReader.builder(exporter).setInterval(Duration.ofMinutes(1)).build()) + .build(); + } + + @Override + public void recordCallStarted(CallLabels labels) { + serverCallsStarted.add(1, labels.getOtelAttributes()); + + int outstanding = numOutstandingRpcs.incrementAndGet(); + maxSeen.updateAndGet(n -> Math.max(outstanding, n)); + } + + @Override + public void recordCredLatency(CallLabels labels, Status status, Duration duration) { + Attributes attributes = + labels.getOtelAttributes().toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + clientCredLatencies.record(duration.toMillis(), attributes); + } + + @Override + public void recordQueueLatency(CallLabels labels, Duration duration) { + clientQueueLatencies.record(duration.toMillis(), labels.getOtelAttributes()); + } + + @Override + public void recordRequestSize(CallLabels labels, long size) { + requestSizes.record(size, labels.getOtelAttributes()); + } + + @Override + public void recordResponseSize(CallLabels labels, long size) { + responseSizes.record(size, labels.getOtelAttributes()); + } + + @Override + public void recordGfeLatency(CallLabels labels, Duration duration) { + gfeLatency.record(duration.toMillis(), labels.getOtelAttributes()); + } + + @Override + public void recordGfeHeaderMissing(CallLabels labels) { + gfeResponseHeadersMissing.add(1, labels.getOtelAttributes()); + } + + @Override + public void recordCallLatency(CallLabels labels, Status status, Duration duration) { + Attributes attributes = + labels.getOtelAttributes().toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + + clientCallLatencies.record(duration.toMillis(), attributes); + numOutstandingRpcs.decrementAndGet(); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java new file mode 100644 index 00000000000..d7085fbe81e --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -0,0 +1,47 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import io.grpc.Status; +import java.time.Duration; + +public class NoopMetrics implements Metrics { + + @Override + public void recordCallStarted(CallLabels labels) {} + + @Override + public void recordCredLatency(CallLabels labels, Status status, Duration duration) {} + + @Override + public void recordQueueLatency(CallLabels labels, Duration duration) {} + + @Override + public void recordRequestSize(CallLabels labels, long size) {} + + @Override + public void recordResponseSize(CallLabels labels, long size) {} + + @Override + public void recordGfeLatency(CallLabels labels, Duration duration) {} + + @Override + public void recordGfeHeaderMissing(CallLabels labels) {} + + @Override + public void recordCallLatency(CallLabels labels, Status status, Duration duration) {} +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java new file mode 100644 index 00000000000..5db4b684a65 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -0,0 +1,110 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import com.google.common.base.Stopwatch; +import io.grpc.CallOptions; +import io.grpc.CallOptions.Key; +import io.grpc.ClientStreamTracer; +import io.grpc.Metadata; +import io.grpc.Status; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class Tracer extends ClientStreamTracer { + private static final Key CALL_OPTION_KEY = Key.create("bigtable-proxy-tracer"); + + private static final Metadata.Key SERVER_TIMING_HEADER_KEY = + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER); + private static final Pattern SERVER_TIMING_HEADER_PATTERN = Pattern.compile(".*dur=(?\\d+)"); + + private final Metrics metrics; + private final CallLabels callLabels; + private final Stopwatch stopwatch; + private volatile Optional grpcQueueDuration = Optional.empty(); + private final AtomicLong responseSize = new AtomicLong(); + + public Tracer(Metrics metrics, CallLabels callLabels) { + this.metrics = metrics; + this.callLabels = callLabels; + + stopwatch = Stopwatch.createStarted(); + + metrics.recordCallStarted(callLabels); + } + + public CallOptions injectIntoCallOptions(CallOptions callOptions) { + return callOptions + .withOption(CALL_OPTION_KEY, this) + .withStreamTracerFactory( + new Factory() { + @Override + public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) { + return Tracer.this; + } + }); + } + + public static Tracer extractTracerFromCallOptions(CallOptions callOptions) { + return callOptions.getOption(CALL_OPTION_KEY); + } + + @Override + public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) { + grpcQueueDuration = + Optional.of(Duration.of(stopwatch.elapsed(TimeUnit.MICROSECONDS), ChronoUnit.MICROS)); + } + + @Override + public void outboundUncompressedSize(long bytes) { + metrics.recordRequestSize(callLabels, bytes); + } + + @Override + public void inboundUncompressedSize(long bytes) { + responseSize.addAndGet(bytes); + } + + @Override + public void inboundHeaders(Metadata headers) { + Optional.ofNullable(headers.get(SERVER_TIMING_HEADER_KEY)) + .map(SERVER_TIMING_HEADER_PATTERN::matcher) + .filter(Matcher::find) + .map(m -> m.group("dur")) + .map(Long::parseLong) + .map(Duration::ofMillis) + .ifPresentOrElse( + d -> metrics.recordGfeLatency(callLabels, d), + () -> metrics.recordGfeHeaderMissing(callLabels)); + } + + public void onCallFinished(Status status) { + grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(callLabels, d)); + metrics.recordResponseSize(callLabels, responseSize.get()); + metrics.recordCallLatency( + callLabels, status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + } + + public void onCredentialsFetch(Status status, Duration duration) { + metrics.recordCredLatency(callLabels, status, duration); + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java new file mode 100644 index 00000000000..40af628ed59 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -0,0 +1,443 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import static org.junit.Assert.assertThrows; +import static org.mockito.AdditionalMatchers.geq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.verify; + +import com.google.auth.Credentials; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableBlockingStub; +import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.common.collect.Lists; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerBuilder; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import io.grpc.testing.GrpcCleanupRule; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URI; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +@RunWith(JUnit4.class) +public class ServeMetricsTest { + @Rule public final MockitoRule mockitoTestRule = MockitoJUnit.rule(); + + @Mock Metrics mockMetrics; + + @Rule + public final GrpcCleanupRule grpcCleanup = new GrpcCleanupRule().setTimeout(1, TimeUnit.MINUTES); + + private MetadataInterceptor serverMetadataInterceptor = new MetadataInterceptor(); + @Spy FakeDataService dataService = new FakeDataService(); + @Spy FakeCredentials fakeCredentials = new FakeCredentials(); + private ManagedChannel fakeServiceChannel; + private Serve serve; + private ManagedChannel proxyChannel; + + @Before + public void setUp() throws Exception { + Server server = grpcCleanup.register(createServer()); + + fakeServiceChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", server.getPort()) + .usePlaintext() + .build() + ); + + serve = createAndStartCommand(fakeServiceChannel, fakeCredentials, mockMetrics); + + proxyChannel = + grpcCleanup.register( + ManagedChannelBuilder.forAddress("localhost", serve.listenPort) + .usePlaintext() + .build() + ); + } + + @After + public void tearDown() throws Exception { + if (serve != null) { + serve.cleanup(); + } + } + + private Server createServer() throws IOException { + for (int i = 10; i >= 0; i--) { + int port; + try (ServerSocket serverSocket = new ServerSocket(0)) { + port = serverSocket.getLocalPort(); + } + try { + return ServerBuilder.forPort(port) + .intercept(serverMetadataInterceptor) + .addService(dataService) + .build() + .start(); + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + private static Serve createAndStartCommand( + ManagedChannel targetChannel, FakeCredentials targetCredentials, Metrics metrics) + throws IOException { + for (int i = 10; i >= 0; i--) { + Serve s = new Serve(); + s.dataChannel = targetChannel; + s.adminChannel = targetChannel; + s.credentials = targetCredentials; + s.metrics = metrics; + + try (ServerSocket serverSocket = new ServerSocket(0)) { + s.listenPort = serverSocket.getLocalPort(); + } + + try { + s.start(); + return s; + } catch (IOException e) { + if (i == 0) { + throw e; + } + } + } + throw new IllegalStateException( + "Should never happen, if the server could be started it should've been returned or the last" + + " attempt threw an exception"); + } + + @Test + public void testHappyPath() throws IOException { + serverMetadataInterceptor.responseHeaders = + () -> { + Metadata md = new Metadata(); + md.put(Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER), "dur=1234"); + return md; + }; + + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall<>( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + // inject call labels + headers.put( + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-app-profile")); + headers.put( + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), + "fake-client"); + + super.start(responseListener, headers); + } + }; + } + }); + + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(dataService) + .checkAndMutateRow(any(), any()); + + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(fakeCredentials) + .getRequestMetadata(Mockito.any()); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); + + CallLabels expectedLabels = + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")); + + verify(mockMetrics).recordCallStarted(eq(expectedLabels)); + verify(mockMetrics) + .recordCredLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordGfeLatency(eq(expectedLabels), eq(Duration.ofMillis(1234))); + verify(mockMetrics).recordQueueLatency(eq(expectedLabels), geq(Duration.ZERO)); + verify(mockMetrics) + .recordRequestSize(eq(expectedLabels), eq((long) request.getSerializedSize())); + verify(mockMetrics) + .recordResponseSize(eq(expectedLabels), eq((long) response.getSerializedSize())); + verify(mockMetrics) + .recordCallLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(20))); + } + + @Test + public void testMissingGfe() throws IOException { + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall<>( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + // inject call labels + headers.put( + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-app-profile")); + headers.put( + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), + "fake-client"); + + super.start(responseListener, headers); + } + }; + } + }); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); + + CallLabels expectedLabels = + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")); + + verify(mockMetrics).recordGfeHeaderMissing(eq(expectedLabels)); + } + + @Test + public void testError() throws IOException { + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(proxyChannel) + .withInterceptors( + new ClientInterceptor() { + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, + CallOptions callOptions, + Channel channel) { + return new SimpleForwardingClientCall<>( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + // inject call labels + headers.put( + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-app-profile")); + headers.put( + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), + "fake-client"); + + super.start(responseListener, headers); + } + }; + } + }); + + doAnswer( + invocation -> { + Thread.sleep(10); + return invocation.callRealMethod(); + }) + .when(fakeCredentials) + .getRequestMetadata(Mockito.any()); + + doAnswer( + invocation -> { + Thread.sleep(10); + invocation + .getArgument(1, StreamObserver.class) + .onError(Status.INTERNAL.asRuntimeException()); + return null; + }) + .when(dataService) + .checkAndMutateRow(any(), any()); + + CheckAndMutateRowRequest request = + CheckAndMutateRowRequest.newBuilder() + .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") + .build(); + assertThrows(StatusRuntimeException.class, () -> stub.checkAndMutateRow(request)); + + CallLabels expectedLabels = + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")); + + verify(mockMetrics).recordCallStarted(eq(expectedLabels)); + verify(mockMetrics) + .recordCredLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordQueueLatency(eq(expectedLabels), geq(Duration.ZERO)); + verify(mockMetrics) + .recordRequestSize(eq(expectedLabels), eq((long) request.getSerializedSize())); + verify(mockMetrics).recordResponseSize(eq(expectedLabels), eq(0L)); + verify(mockMetrics) + .recordCallLatency(eq(expectedLabels), eq(Status.INTERNAL), geq(Duration.ofMillis(20))); + } + + static class MetadataInterceptor implements ServerInterceptor { + private BlockingQueue requestHeaders = new LinkedBlockingDeque<>(); + volatile Supplier responseHeaders = Metadata::new; + volatile Supplier responseTrailers = Metadata::new; + + @Override + public Listener interceptCall( + ServerCall call, Metadata metadata, ServerCallHandler next) { + requestHeaders.add(metadata); + return next.startCall( + new SimpleForwardingServerCall(call) { + @Override + public void sendHeaders(Metadata headers) { + headers.merge(responseHeaders.get()); + super.sendHeaders(headers); + } + + @Override + public void close(Status status, Metadata trailers) { + trailers.merge(responseTrailers.get()); + super.close(status, trailers); + } + }, + metadata); + } + } + + private static class FakeDataService extends BigtableImplBase { + + @Override + public void checkAndMutateRow( + CheckAndMutateRowRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + CheckAndMutateRowResponse.newBuilder().setPredicateMatched(true).build()); + responseObserver.onCompleted(); + } + } + + private static class FakeCredentials extends Credentials { + private static final String HEADER_NAME = "authorization"; + private String fakeValue = "fake-token"; + + @Override + public String getAuthenticationType() { + return "fake"; + } + + @Override + public Map> getRequestMetadata(URI uri) throws IOException { + return Map.of(HEADER_NAME, Lists.newArrayList(fakeValue)); + } + + @Override + public boolean hasRequestMetadata() { + return true; + } + + @Override + public boolean hasRequestMetadataOnly() { + return true; + } + + @Override + public void refresh() throws IOException { + // noop + } + } +} diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java index 128544c88f6..d3c458ae2d4 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeParsingTest.java @@ -28,9 +28,10 @@ public class ServeParsingTest { @Test public void testMinimalArgs() { Serve serve = new Serve(); - new CommandLine(serve).parseArgs("--listen-port=1234"); + new CommandLine(serve).parseArgs("--listen-port=1234", "--metrics-project-id=fake-project"); assertThat(serve.listenPort).isEqualTo(1234); + assertThat(serve.metricsProjectId).isEqualTo("fake-project"); assertThat(serve.userAgent).isEqualTo("bigtable-java-proxy"); assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("bigtable.googleapis.com", 443)); assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("bigtableadmin.googleapis.com", 443)); @@ -40,7 +41,10 @@ public void testMinimalArgs() { public void testDataEndpointOverride() { Serve serve = new Serve(); new CommandLine(serve) - .parseArgs("--listen-port=1234", "--bigtable-data-endpoint=example.com:1234"); + .parseArgs( + "--listen-port=1234", + "--metrics-project-id=fake-project", + "--bigtable-data-endpoint=example.com:1234"); assertThat(serve.listenPort).isEqualTo(1234); assertThat(serve.dataEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); @@ -50,9 +54,20 @@ public void testDataEndpointOverride() { public void testAdminDataEndpointOverride() { Serve serve = new Serve(); new CommandLine(serve) - .parseArgs("--listen-port=1234", "--bigtable-admin-endpoint=example.com:1234"); + .parseArgs( + "--listen-port=1234", + "--metrics-project-id=fake-project", + "--bigtable-admin-endpoint=example.com:1234"); assertThat(serve.listenPort).isEqualTo(1234); assertThat(serve.adminEndpoint).isEqualTo(Endpoint.create("example.com", 1234)); } + + @Test + public void testMetricsProjectIdOverride() { + Serve serve = new Serve(); + new CommandLine(serve) + .parseArgs("--listen-port=1234", "--metrics-project-id=other-fake-project"); + assertThat(serve.metricsProjectId).isEqualTo("other-fake-project"); + } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java index c55cc2bcec4..69be009dd5b 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeTest.java @@ -37,6 +37,7 @@ import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.cloud.bigtable.examples.proxy.metrics.NoopMetrics; import com.google.common.collect.Lists; import com.google.common.collect.Range; import com.google.common.util.concurrent.ListenableFuture; @@ -453,6 +454,7 @@ private static Serve createAndStartCommand( s.dataChannel = targetChannel; s.adminChannel = targetChannel; s.credentials = targetCredentials; + s.metrics = new NoopMetrics(); try (ServerSocket serverSocket = new ServerSocket(0)) { s.listenPort = serverSocket.getLocalPort(); diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java new file mode 100644 index 00000000000..0de81769ca3 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java @@ -0,0 +1,157 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import static com.google.common.truth.Truth.assertAbout; +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.common.truth.FailureMetadata; +import com.google.common.truth.MapSubject; +import com.google.common.truth.Subject; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.opentelemetry.api.common.AttributeKey; +import java.util.Optional; +import org.jspecify.annotations.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class CallLabelsTest { + private static final Key REQUEST_PARAMS = + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); + private static final Key API_CLIENT = + Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); + + @Test + public void testAllBasic() { + Metadata md = new Metadata(); + md.put(REQUEST_PARAMS, "table_name=projects/p/instances/i/tables/t&app_profile_id=a"); + md.put(API_CLIENT, "some-client"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getApiClient()).isEqualTo(Optional.of("some-client")); + assertThat(callLabels.getAppProfileId()).isEqualTo(Optional.of("a")); + assertThat(callLabels.getResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast( + AttributeKey.stringKey("apiclient"), "some-client", + AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t", + AttributeKey.stringKey("app_profile"), "a", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } + + @Test + public void testResourceEscaped() { + Metadata md = new Metadata(); + md.put(REQUEST_PARAMS, "table_name=projects/p/instances/i/tables/t".replace("/", "%2F")); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast(AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t"); + } + + @Test + public void testEmpty() { + Metadata md = new Metadata(); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast( + AttributeKey.stringKey("apiclient"), "", + AttributeKey.stringKey("resource"), "", + AttributeKey.stringKey("app_profile"), "", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } + + @Test + public void testMalformed1() { + Metadata md = new Metadata(); + md.put(REQUEST_PARAMS, "table_name="); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast(AttributeKey.stringKey("resource"), ""); + } + + @Test + public void testMalformed2() { + Metadata md = new Metadata(); + md.put(REQUEST_PARAMS, "&"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast(AttributeKey.stringKey("resource"), ""); + } + + @Test + public void testMalformed3() { + Metadata md = new Metadata(); + md.put(REQUEST_PARAMS, "table_name=&"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + + assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + CallLabelsSubject.assertThat(callLabels) + .hasOtelAttributesThat() + .containsAtLeast(AttributeKey.stringKey("resource"), ""); + } + + private static class CallLabelsSubject extends Subject { + private final CallLabels actual; + + public CallLabelsSubject(FailureMetadata metadata, @Nullable CallLabels actual) { + super(metadata, actual); + this.actual = actual; + } + + public static Factory callLabels() { + return CallLabelsSubject::new; + } + + public static CallLabelsSubject assertThat(CallLabels callLabels) { + return assertAbout(callLabels()).that(callLabels); + } + + public MapSubject hasOtelAttributesThat() { + return check("getOtelAttributes()").that(actual.getOtelAttributes().asMap()); + } + + public void hasMethodName(String method) { + check("getMethodName()").that(actual.getMethodName()).isEqualTo(method); + } + + public void hasResourceName(String resourceName) { + check("hasResourceName()") + .that(actual.getResourceName()) + .isEqualTo(Optional.of(resourceName)); + } + } +} From 00b8db64b44657b428a0790bc5a99ec61be459f5 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 17:01:10 -0500 Subject: [PATCH 22/66] feat: add channel pooling (#9692) stack-info: PR: https://github.com/GoogleCloudPlatform/java-docs-samples/pull/9692, branch: igorbernstein2/stack/5 --- .../proxy/channelpool/ChannelFactory.java | 35 + .../proxy/channelpool/ChannelPool.java | 591 +++++++++++++ .../channelpool/ChannelPoolSettings.java | 169 ++++ .../proxy/channelpool/DataChannel.java | 222 +++++ .../proxy/channelpool/ResourceCollector.java | 59 ++ .../examples/proxy/commands/Serve.java | 60 +- .../examples/proxy/metrics/CallLabels.java | 1 - .../examples/proxy/metrics/Metrics.java | 2 + .../examples/proxy/metrics/MetricsImpl.java | 18 +- .../examples/proxy/metrics/NoopMetrics.java | 3 + .../examples/proxy/metrics/Tracer.java | 4 + .../proxy/channelpool/ChannelPoolTest.java | 804 ++++++++++++++++++ 12 files changed, 1948 insertions(+), 20 deletions(-) create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java new file mode 100644 index 00000000000..10c68d7d9e7 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copied from +// https://github.com/googleapis/sdk-platform-java/blob/a333b0709023c971f12a85e5287b6d77d1b57c48/gax-java/gax-grpc/src/main/java/com/google/api/gax/grpc/ChannelFactory.java +// Changes: +// - package name +// - removed InternalApi annotation + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import io.grpc.ManagedChannel; +import java.io.IOException; + +/** + * This interface represents a factory for creating one ManagedChannel + * + *

This is public only for technical reasons, for advanced usage. + */ +public interface ChannelFactory { + ManagedChannel createSingleChannel() throws IOException; +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java new file mode 100644 index 00000000000..188dd1bf131 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java @@ -0,0 +1,591 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.api.core.InternalApi; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** + * A {@link ManagedChannel} that will send requests round-robin via a set of channels. + * + *

In addition to spreading requests over a set of child connections, the pool will also actively + * manage the lifecycle of the channels. Currently lifecycle management is limited to pre-emptively + * replacing channels every hour. In the future it will dynamically size the pool based on number of + * outstanding requests. + * + *

Package-private for internal use. + */ +public class ChannelPool extends ManagedChannel { + @VisibleForTesting static final Logger LOG = Logger.getLogger(ChannelPool.class.getName()); + private static final java.time.Duration REFRESH_PERIOD = java.time.Duration.ofMinutes(50); + + private final ChannelPoolSettings settings; + private final ChannelFactory channelFactory; + private final ScheduledExecutorService executor; + + private final Object entryWriteLock = new Object(); + @VisibleForTesting final AtomicReference> entries = new AtomicReference<>(); + private final AtomicInteger indexTicker = new AtomicInteger(); + private final String authority; + + public static ChannelPool create(ChannelPoolSettings settings, ChannelFactory channelFactory) + throws IOException { + return new ChannelPool(settings, channelFactory, Executors.newSingleThreadScheduledExecutor()); + } + + /** + * Initializes the channel pool. Assumes that all channels have the same authority. + * + * @param settings options for controling the ChannelPool sizing behavior + * @param channelFactory method to create the channels + * @param executor periodically refreshes the channels + */ + @VisibleForTesting + ChannelPool( + ChannelPoolSettings settings, + ChannelFactory channelFactory, + ScheduledExecutorService executor) + throws IOException { + this.settings = settings; + this.channelFactory = channelFactory; + + ImmutableList.Builder initialListBuilder = ImmutableList.builder(); + + for (int i = 0; i < settings.getInitialChannelCount(); i++) { + initialListBuilder.add(new Entry(channelFactory.createSingleChannel())); + } + + entries.set(initialListBuilder.build()); + authority = entries.get().get(0).channel.authority(); + this.executor = executor; + + if (!settings.isStaticSize()) { + executor.scheduleAtFixedRate( + this::resizeSafely, + ChannelPoolSettings.RESIZE_INTERVAL.getSeconds(), + ChannelPoolSettings.RESIZE_INTERVAL.getSeconds(), + TimeUnit.SECONDS); + } + if (settings.isPreemptiveRefreshEnabled()) { + executor.scheduleAtFixedRate( + this::refreshSafely, + REFRESH_PERIOD.getSeconds(), + REFRESH_PERIOD.getSeconds(), + TimeUnit.SECONDS); + } + } + + /** {@inheritDoc} */ + @Override + public String authority() { + return authority; + } + + /** + * Create a {@link ClientCall} on a Channel from the pool chosen in a round-robin fashion to the + * remote operation specified by the given {@link MethodDescriptor}. The returned {@link + * ClientCall} does not trigger any remote behavior until {@link + * ClientCall#start(ClientCall.Listener, io.grpc.Metadata)} is invoked. + */ + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return getChannel(indexTicker.getAndIncrement()).newCall(methodDescriptor, callOptions); + } + + Channel getChannel(int affinity) { + return new AffinityChannel(affinity); + } + + /** {@inheritDoc} */ + @Override + public ManagedChannel shutdown() { + LOG.fine("Initiating graceful shutdown due to explicit request"); + + List localEntries = entries.get(); + for (Entry entry : localEntries) { + entry.channel.shutdown(); + } + if (executor != null) { + // shutdownNow will cancel scheduled tasks + executor.shutdownNow(); + } + return this; + } + + /** {@inheritDoc} */ + @Override + public boolean isShutdown() { + List localEntries = entries.get(); + for (Entry entry : localEntries) { + if (!entry.channel.isShutdown()) { + return false; + } + } + return executor == null || executor.isShutdown(); + } + + /** {@inheritDoc} */ + @Override + public boolean isTerminated() { + List localEntries = entries.get(); + for (Entry entry : localEntries) { + if (!entry.channel.isTerminated()) { + return false; + } + } + + return executor == null || executor.isTerminated(); + } + + /** {@inheritDoc} */ + @Override + public ManagedChannel shutdownNow() { + LOG.fine("Initiating immediate shutdown due to explicit request"); + + List localEntries = entries.get(); + for (Entry entry : localEntries) { + entry.channel.shutdownNow(); + } + if (executor != null) { + executor.shutdownNow(); + } + return this; + } + + /** {@inheritDoc} */ + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + long endTimeNanos = System.nanoTime() + unit.toNanos(timeout); + List localEntries = entries.get(); + for (Entry entry : localEntries) { + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + if (awaitTimeNanos <= 0) { + break; + } + entry.channel.awaitTermination(awaitTimeNanos, TimeUnit.NANOSECONDS); + } + if (executor != null) { + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + executor.awaitTermination(awaitTimeNanos, TimeUnit.NANOSECONDS); + } + return isTerminated(); + } + + private void resizeSafely() { + try { + synchronized (entryWriteLock) { + resize(); + } + } catch (Exception e) { + LOG.log(Level.WARNING, "Failed to resize channel pool", e); + } + } + + /** + * Resize the number of channels based on the number of outstanding RPCs. + * + *

This method is expected to be called on a fixed interval. On every invocation it will: + * + *

    + *
  • Get the maximum number of outstanding RPCs since last invocation + *
  • Determine a valid range of number of channels to handle that many outstanding RPCs + *
  • If the current number of channel falls outside of that range, add or remove at most + * {@link ChannelPoolSettings#MAX_RESIZE_DELTA} to get closer to middle of that range. + *
+ * + *

Not threadsafe, must be called under the entryWriteLock monitor + */ + @VisibleForTesting + void resize() { + List localEntries = entries.get(); + // Estimate the peak of RPCs in the last interval by summing the peak of RPCs per channel + int actualOutstandingRpcs = + localEntries.stream().mapToInt(Entry::getAndResetMaxOutstanding).sum(); + + // Number of channels if each channel operated at max capacity + int minChannels = + (int) Math.ceil(actualOutstandingRpcs / (double) settings.getMaxRpcsPerChannel()); + // Limit the threshold to absolute range + if (minChannels < settings.getMinChannelCount()) { + minChannels = settings.getMinChannelCount(); + } + + // Number of channels if each channel operated at minimum capacity + // Note: getMinRpcsPerChannel() can return 0, but division by 0 shouldn't cause a problem. + int maxChannels = + (int) Math.ceil(actualOutstandingRpcs / (double) settings.getMinRpcsPerChannel()); + // Limit the threshold to absolute range + if (maxChannels > settings.getMaxChannelCount()) { + maxChannels = settings.getMaxChannelCount(); + } + if (maxChannels < minChannels) { + maxChannels = minChannels; + } + + // If the pool were to be resized, try to aim for the middle of the bound, but limit rate of + // change. + int tentativeTarget = (maxChannels + minChannels) / 2; + int currentSize = localEntries.size(); + int delta = tentativeTarget - currentSize; + int dampenedTarget = tentativeTarget; + if (Math.abs(delta) > ChannelPoolSettings.MAX_RESIZE_DELTA) { + dampenedTarget = + currentSize + (int) Math.copySign(ChannelPoolSettings.MAX_RESIZE_DELTA, delta); + } + + // Only resize the pool when thresholds are crossed + if (localEntries.size() < minChannels) { + LOG.fine( + String.format( + "Detected throughput peak of %d, expanding channel pool size: %d -> %d.", + actualOutstandingRpcs, currentSize, dampenedTarget)); + + expand(dampenedTarget); + } else if (localEntries.size() > maxChannels) { + LOG.fine( + String.format( + "Detected throughput drop to %d, shrinking channel pool size: %d -> %d.", + actualOutstandingRpcs, currentSize, dampenedTarget)); + + shrink(dampenedTarget); + } + } + + /** Not threadsafe, must be called under the entryWriteLock monitor */ + private void shrink(int desiredSize) { + ImmutableList localEntries = entries.get(); + Preconditions.checkState( + localEntries.size() >= desiredSize, "current size is already smaller than the desired"); + + // Set the new list + entries.set(localEntries.subList(0, desiredSize)); + // clean up removed entries + List removed = localEntries.subList(desiredSize, localEntries.size()); + removed.forEach(Entry::requestShutdown); + } + + /** Not threadsafe, must be called under the entryWriteLock monitor */ + private void expand(int desiredSize) { + List localEntries = entries.get(); + Preconditions.checkState( + localEntries.size() <= desiredSize, "current size is already bigger than the desired"); + + ImmutableList.Builder newEntries = ImmutableList.builder().addAll(localEntries); + + for (int i = 0; i < desiredSize - localEntries.size(); i++) { + try { + newEntries.add(new Entry(channelFactory.createSingleChannel())); + } catch (IOException e) { + LOG.log(Level.WARNING, "Failed to add channel", e); + } + } + + entries.set(newEntries.build()); + } + + private void refreshSafely() { + try { + refresh(); + } catch (Exception e) { + LOG.log(Level.WARNING, "Failed to pre-emptively refresh channnels", e); + } + } + + /** + * Replace all of the channels in the channel pool with fresh ones. This is meant to mitigate the + * hourly GFE disconnects by giving clients the ability to prime the channel on reconnect. + * + *

This is done on a best effort basis. If the replacement channel fails to construct, the old + * channel will continue to be used. + */ + @InternalApi("Visible for testing") + void refresh() { + // Note: synchronization is necessary in case refresh is called concurrently: + // - thread1 fails to replace a single entry + // - thread2 succeeds replacing an entry + // - thread1 loses the race to replace the list + // - then thread2 will shut down channel that thread1 will put back into circulation (after it + // replaces the list) + synchronized (entryWriteLock) { + LOG.fine("Refreshing all channels"); + ArrayList newEntries = new ArrayList<>(entries.get()); + + for (int i = 0; i < newEntries.size(); i++) { + try { + newEntries.set(i, new Entry(channelFactory.createSingleChannel())); + } catch (IOException e) { + LOG.log(Level.WARNING, "Failed to refresh channel, leaving old channel", e); + } + } + + ImmutableList replacedEntries = entries.getAndSet(ImmutableList.copyOf(newEntries)); + + // Shutdown the channels that were cycled out. + for (Entry e : replacedEntries) { + if (!newEntries.contains(e)) { + e.requestShutdown(); + } + } + } + } + + /** + * Get and retain a Channel Entry. The returned Entry will have its rpc count incremented, + * preventing it from getting recycled. + */ + Entry getRetainedEntry(int affinity) { + // The maximum number of concurrent calls to this method for any given time span is at most 2, + // so the loop can actually be 2 times. But going for 5 times for a safety margin for potential + // code evolving + for (int i = 0; i < 5; i++) { + Entry entry = getEntry(affinity); + if (entry.retain()) { + return entry; + } + } + // It is unlikely to reach here unless the pool code evolves to increase the maximum possible + // concurrent calls to this method. If it does, this is a bug in the channel pool implementation + // the number of retries above should be greater than the number of contending maintenance + // tasks. + throw new IllegalStateException("Bug: failed to retain a channel"); + } + + /** + * Returns one of the channels managed by this pool. The pool continues to "own" the channel, and + * the caller should not shut it down. + * + * @param affinity Two calls to this method with the same affinity returns the same channel most + * of the time, if the channel pool was refreshed since the last call, a new channel will be + * returned. The reverse is not true: Two calls with different affinities might return the + * same channel. However, the implementation should attempt to spread load evenly. + */ + private Entry getEntry(int affinity) { + List localEntries = entries.get(); + + int index = Math.abs(affinity % localEntries.size()); + + return localEntries.get(index); + } + + /** Bundles a gRPC {@link ManagedChannel} with some usage accounting. */ + static class Entry { + private final ManagedChannel channel; + + /** + * The primary purpose of keeping a count for outstanding RPCs is to track when a channel is + * safe to close. In grpc, initialization & starting of rpcs is split between 2 methods: + * Channel#newCall() and ClientCall#start. gRPC already has a mechanism to safely close channels + * that have rpcs that have been started. However, it does not protect calls that have been + * created but not started. In the sequence: Channel#newCall() Channel#shutdown() + * ClientCall#Start(), gRpc will error out the call telling the caller that the channel is + * shutdown. + * + *

Hence, the increment of outstanding RPCs has to happen when the ClientCall is initialized, + * as part of Channel#newCall(), not after the ClientCall is started. The decrement of + * outstanding RPCs has to happen when the ClientCall is closed or the ClientCall failed to + * start. + */ + @VisibleForTesting final AtomicInteger outstandingRpcs = new AtomicInteger(0); + + private final AtomicInteger maxOutstanding = new AtomicInteger(); + + // Flag that the channel should be closed once all of the outstanding RPC complete. + private final AtomicBoolean shutdownRequested = new AtomicBoolean(); + // Flag that the channel has been closed. + private final AtomicBoolean shutdownInitiated = new AtomicBoolean(); + + private Entry(ManagedChannel channel) { + this.channel = channel; + } + + int getAndResetMaxOutstanding() { + return maxOutstanding.getAndSet(outstandingRpcs.get()); + } + + /** + * Try to increment the outstanding RPC count. The method will return false if the channel is + * closing and the caller should pick a different channel. If the method returned true, the + * channel has been successfully retained and it is the responsibility of the caller to release + * it. + */ + private boolean retain() { + // register desire to start RPC + int currentOutstanding = outstandingRpcs.incrementAndGet(); + + // Rough book keeping + int prevMax = maxOutstanding.get(); + if (currentOutstanding > prevMax) { + maxOutstanding.incrementAndGet(); + } + + // abort if the channel is closing + if (shutdownRequested.get()) { + release(); + return false; + } + return true; + } + + /** + * Notify the channel that the number of outstanding RPCs has decreased. If shutdown has been + * previously requested, this method will shutdown the channel if its the last outstanding RPC. + */ + private void release() { + int newCount = outstandingRpcs.decrementAndGet(); + if (newCount < 0) { + LOG.log(Level.WARNING, "Bug! Reference count is negative (" + newCount + ")!"); + } + + // Must check outstandingRpcs after shutdownRequested (in reverse order of retain()) to ensure + // mutual exclusion. + if (shutdownRequested.get() && outstandingRpcs.get() == 0) { + shutdown(); + } + } + + /** + * Request a shutdown. The actual shutdown will be delayed until there are no more outstanding + * RPCs. + */ + private void requestShutdown() { + shutdownRequested.set(true); + if (outstandingRpcs.get() == 0) { + shutdown(); + } + } + + /** Ensure that shutdown is only called once. */ + private void shutdown() { + if (shutdownInitiated.compareAndSet(false, true)) { + channel.shutdown(); + } + } + } + + /** Thin wrapper to ensure that new calls are properly reference counted. */ + private class AffinityChannel extends Channel { + private final int affinity; + + public AffinityChannel(int affinity) { + this.affinity = affinity; + } + + @Override + public String authority() { + return authority; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + + Entry entry = getRetainedEntry(affinity); + + return new ReleasingClientCall<>(entry.channel.newCall(methodDescriptor, callOptions), entry); + } + } + + /** ClientCall wrapper that makes sure to decrement the outstanding RPC count on completion. */ + static class ReleasingClientCall extends SimpleForwardingClientCall { + @Nullable private CancellationException cancellationException; + final Entry entry; + private final AtomicBoolean wasClosed = new AtomicBoolean(); + private final AtomicBoolean wasReleased = new AtomicBoolean(); + + public ReleasingClientCall(ClientCall delegate, Entry entry) { + super(delegate); + this.entry = entry; + } + + @Override + public void start(Listener responseListener, Metadata headers) { + if (cancellationException != null) { + throw new IllegalStateException("Call is already cancelled", cancellationException); + } + try { + super.start( + new SimpleForwardingClientCallListener(responseListener) { + @Override + public void onClose(Status status, Metadata trailers) { + if (!wasClosed.compareAndSet(false, true)) { + LOG.log( + Level.WARNING, + "Call is being closed more than once. Please make sure that onClose() is" + + " not being manually called."); + return; + } + try { + super.onClose(status, trailers); + } finally { + if (wasReleased.compareAndSet(false, true)) { + entry.release(); + } else { + LOG.log( + Level.WARNING, + "Entry was released before the call is closed. This may be due to an" + + " exception on start of the call."); + } + } + } + }, + headers); + } catch (Exception e) { + // In case start failed, make sure to release + if (wasReleased.compareAndSet(false, true)) { + entry.release(); + } else { + LOG.log( + Level.WARNING, + "The entry is already released. This indicates that onClose() has already been" + + " called previously"); + } + throw e; + } + } + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) { + this.cancellationException = new CancellationException(message); + super.cancel(message, cause); + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java new file mode 100644 index 00000000000..6788e95f485 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolSettings.java @@ -0,0 +1,169 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.api.core.BetaApi; +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import java.time.Duration; + +/** + * Settings to control {@link ChannelPool} behavior. + * + *

To facilitate low latency/high throughout applications, gax provides a {@link ChannelPool}. + * The pool is meant to facilitate high throughput/low latency clients. By splitting load across + * multiple gRPC channels the client can spread load across multiple frontends and overcome gRPC's + * limit of 100 concurrent RPCs per channel. However oversizing the {@link ChannelPool} can lead to + * underutilized channels which will lead to high tail latency due to GFEs disconnecting idle + * channels. + * + *

The {@link ChannelPool} is designed to adapt to varying traffic patterns by tracking + * outstanding RPCs and resizing the pool size. This class configures the behavior. In general + * clients should aim to have less than 50 concurrent RPCs per channel and at least 1 outstanding + * per channel per minute. + * + *

The settings in this class will be applied every minute. + */ +@BetaApi("surface for channel pool sizing is not yet stable") +@AutoValue +public abstract class ChannelPoolSettings { + /** How often to check and possibly resize the {@link ChannelPool}. */ + static final Duration RESIZE_INTERVAL = Duration.ofMinutes(1); + /** The maximum number of channels that can be added or removed at a time. */ + static final int MAX_RESIZE_DELTA = 2; + + /** + * Threshold to start scaling down the channel pool. + * + *

When the average of the maximum number of outstanding RPCs in a single minute drop below + * this threshold, channels will be removed from the pool. + */ + public abstract int getMinRpcsPerChannel(); + + /** + * Threshold to start scaling up the channel pool. + * + *

When the average of the maximum number of outstanding RPCs in a single minute surpass this + * threshold, channels will be added to the pool. For google services, gRPC channels will start + * locally queuing RPC when there are 100 concurrent RPCs. + */ + public abstract int getMaxRpcsPerChannel(); + + /** + * The absolute minimum size of the channel pool. + * + *

Regardless of the current throughput, the number of channels will not drop below this limit + */ + public abstract int getMinChannelCount(); + + /** + * The absolute maximum size of the channel pool. + * + *

Regardless of the current throughput, the number of channels will not exceed this limit + */ + public abstract int getMaxChannelCount(); + + /** + * The initial size of the channel pool. + * + *

During client construction the client open this many connections. This will be scaled up or + * down in the next period. + */ + public abstract int getInitialChannelCount(); + + /** + * If all of the channels should be replaced on an hourly basis. + * + *

The GFE will forcibly disconnect active channels after an hour. To minimize the cost of + * reconnects, this will create a new channel asynchronuously, prime it and then swap it with an + * old channel. + */ + public abstract boolean isPreemptiveRefreshEnabled(); + + /** Helper to check if the {@link ChannelPool} implementation can skip dynamic size logic */ + boolean isStaticSize() { + // When range is restricted to a single size + if (getMinChannelCount() == getMaxChannelCount()) { + return true; + } + // When the scaling threshold are not set + if (getMinRpcsPerChannel() == 0 && getMaxRpcsPerChannel() == Integer.MAX_VALUE) { + return true; + } + + return false; + } + + public abstract Builder toBuilder(); + + public static ChannelPoolSettings staticallySized(int size) { + return builder() + .setInitialChannelCount(size) + .setMinRpcsPerChannel(0) + .setMaxRpcsPerChannel(Integer.MAX_VALUE) + .setMinChannelCount(size) + .setMaxChannelCount(size) + .build(); + } + + public static Builder builder() { + return new AutoValue_ChannelPoolSettings.Builder() + .setInitialChannelCount(1) + .setMinChannelCount(1) + .setMaxChannelCount(200) + .setMinRpcsPerChannel(0) + .setMaxRpcsPerChannel(Integer.MAX_VALUE) + .setPreemptiveRefreshEnabled(false); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setMinRpcsPerChannel(int count); + + public abstract Builder setMaxRpcsPerChannel(int count); + + public abstract Builder setMinChannelCount(int count); + + public abstract Builder setMaxChannelCount(int count); + + public abstract Builder setInitialChannelCount(int count); + + public abstract Builder setPreemptiveRefreshEnabled(boolean enabled); + + abstract ChannelPoolSettings autoBuild(); + + public ChannelPoolSettings build() { + ChannelPoolSettings s = autoBuild(); + + Preconditions.checkState( + s.getMinRpcsPerChannel() <= s.getMaxRpcsPerChannel(), "rpcsPerChannel range is invalid"); + Preconditions.checkState( + s.getMinChannelCount() > 0, "Minimum channel count must be at least 1"); + Preconditions.checkState( + s.getMinChannelCount() <= s.getMaxRpcsPerChannel(), "absolute channel range is invalid"); + Preconditions.checkState( + s.getMinChannelCount() <= s.getInitialChannelCount(), + "initial channel count be at least minChannelCount"); + Preconditions.checkState( + s.getInitialChannelCount() <= s.getMaxChannelCount(), + "initial channel count must be less than maxChannelCount"); + Preconditions.checkState( + s.getInitialChannelCount() > 0, "Initial channel count must be greater than 0"); + return s; + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java new file mode 100644 index 00000000000..ca6db6c83e5 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -0,0 +1,222 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableFutureStub; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; +import com.google.common.util.concurrent.ListenableFuture; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ConnectivityState; +import io.grpc.Deadline; +import io.grpc.ExperimentalApi; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DataChannel extends ManagedChannel { + private static final Logger LOGGER = LoggerFactory.getLogger(DataChannel.class); + + private final ManagedChannel inner; + private final Metrics metrics; + private final ResourceCollector resourceCollector; + private final BigtableFutureStub warmingStub; + private final ScheduledFuture antiIdleTask; + + private final AtomicBoolean closed = new AtomicBoolean(); + + public DataChannel( + ResourceCollector resourceCollector, + String userAgent, + CallCredentials callCredentials, + String endpoint, + int port, + ScheduledExecutorService warmingExecutor, + Metrics metrics) { + this.resourceCollector = resourceCollector; + + inner = + ManagedChannelBuilder.forAddress(endpoint, port) + .userAgent(userAgent) + .disableRetry() + .maxInboundMessageSize(256 * 1024 * 1024) + .keepAliveTime(30, TimeUnit.SECONDS) + .keepAliveTimeout(10, TimeUnit.SECONDS) + .build(); + this.metrics = metrics; + + try { + warmingStub = BigtableGrpc.newFutureStub(inner).withCallCredentials(callCredentials); + + warm(); + } catch (RuntimeException e) { + try { + inner.shutdown(); + } catch (RuntimeException e2) { + e.addSuppressed(e2); + } + throw e; + } + + antiIdleTask = warmingExecutor.scheduleAtFixedRate(this::warmQuietly, 3, 3, TimeUnit.MINUTES); + metrics.updateChannelCount(1); + } + + private void warmQuietly() { + try { + warm(); + } catch (RuntimeException e) { + LOGGER.warn("anti idle ping failed, forcing reconnect", e); + inner.enterIdle(); + } + } + + private void warm() { + List requests = resourceCollector.getRequests(); + if (requests.isEmpty()) { + return; + } + + BigtableFutureStub timedStub = warmingStub.withDeadline(Deadline.after(1, TimeUnit.MINUTES)); + + List> futures = + requests.stream().map(timedStub::pingAndWarm).collect(Collectors.toList()); + + int successCount = 0; + int failures = 0; + for (ListenableFuture future : futures) { + PingAndWarmRequest request = requests.get(successCount + failures); + try { + future.get(); + successCount++; + } catch (ExecutionException e) { + // All permenant errors are ignored and treated as a success + // The priming request for that generated the error will be dropped + if (e.getCause() instanceof StatusRuntimeException) { + StatusRuntimeException se = (StatusRuntimeException) e.getCause(); + switch (se.getStatus().getCode()) { + case INTERNAL: + case PERMISSION_DENIED: + case NOT_FOUND: + case UNAUTHENTICATED: + successCount++; + // drop the priming request for permenant errors + resourceCollector.evict(request); + continue; + default: + // noop + } + } + LOGGER.warn("Failed to prime channel with request: {}", request, e.getCause()); + failures++; + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted while priming channel with request: " + request, e); + } + } + if (successCount < failures) { + throw new RuntimeException("Most of the priming requests failed"); + } + } + + @Override + public ManagedChannel shutdown() { + if (closed.compareAndSet(false, true)) { + metrics.updateChannelCount(-1); + } + antiIdleTask.cancel(true); + return inner.shutdown(); + } + + @Override + public boolean isShutdown() { + return inner.isShutdown(); + } + + @Override + public boolean isTerminated() { + return inner.isTerminated(); + } + + @Override + public ManagedChannel shutdownNow() { + if (closed.compareAndSet(false, true)) { + metrics.updateChannelCount(-1); + } + antiIdleTask.cancel(true); + return inner.shutdownNow(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return inner.awaitTermination(timeout, unit); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4359") + @Override + public ConnectivityState getState(boolean requestConnection) { + return inner.getState(requestConnection); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4359") + @Override + public void notifyWhenStateChanged(ConnectivityState source, Runnable callback) { + inner.notifyWhenStateChanged(source, callback); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4056") + @Override + public void resetConnectBackoff() { + inner.resetConnectBackoff(); + } + + @ExperimentalApi("https://github.com/grpc/grpc-java/issues/4056") + @Override + public void enterIdle() { + inner.enterIdle(); + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + Optional.ofNullable(Tracer.extractTracerFromCallOptions(callOptions)) + .map(Tracer::getCallLabels) + .ifPresent(resourceCollector::collect); + + return inner.newCall(methodDescriptor, callOptions); + } + + @Override + public String authority() { + return inner.authority(); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java new file mode 100644 index 00000000000..29beecf89e7 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.ImmutableList; +import java.time.Duration; +import java.util.List; + +public class ResourceCollector { + private final Cache warmingRequests = + CacheBuilder.newBuilder().expireAfterWrite(Duration.ofHours(1)).maximumSize(100).build(); + + public void collect(CallLabels labels) { + String[] splits = labels.getResourceName().orElse("").split("/", 5); + if (splits.length <= 4) { + return; + } + if (!"projects".equals(splits[0])) { + return; + } + if (!"instances".equals(splits[2])) { + return; + } + String appProfile = labels.getAppProfileId().orElse(""); + + PingAndWarmRequest req = + PingAndWarmRequest.newBuilder() + .setName("projects/" + splits[1] + "/instances/" + splits[3]) + .setAppProfileId(appProfile) + .build(); + warmingRequests.put(req, true); + } + + public List getRequests() { + return ImmutableList.copyOf(warmingRequests.asMap().keySet()); + } + + public void evict(PingAndWarmRequest request) { + warmingRequests.invalidate(request); + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index 5c62550d28c..a3ed0c9a50f 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -21,6 +21,10 @@ import com.google.bigtable.admin.v2.BigtableInstanceAdminGrpc; import com.google.bigtable.admin.v2.BigtableTableAdminGrpc; import com.google.bigtable.v2.BigtableGrpc; +import com.google.cloud.bigtable.examples.proxy.channelpool.ChannelPool; +import com.google.cloud.bigtable.examples.proxy.channelpool.ChannelPoolSettings; +import com.google.cloud.bigtable.examples.proxy.channelpool.DataChannel; +import com.google.cloud.bigtable.examples.proxy.channelpool.ResourceCollector; import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; import com.google.cloud.bigtable.examples.proxy.core.Registry; import com.google.cloud.bigtable.examples.proxy.metrics.InstrumentedCallCredentials; @@ -40,7 +44,8 @@ import java.net.InetSocketAddress; import java.util.Map; import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -83,6 +88,7 @@ public class Serve implements Callable { Credentials credentials = null; Server server; Metrics metrics; + private ScheduledExecutorService refreshExecutor; @Override public Void call() throws Exception { @@ -93,16 +99,44 @@ public Void call() throws Exception { } void start() throws IOException { + if (credentials == null) { + credentials = GoogleCredentials.getApplicationDefault(); + } + CallCredentials callCredentials = + new InstrumentedCallCredentials(MoreCallCredentials.from(credentials)); + + if (metrics == null) { + metrics = new MetricsImpl(credentials, metricsProjectId); + } + + ResourceCollector resourceCollector = new ResourceCollector(); + refreshExecutor = Executors.newSingleThreadScheduledExecutor(); + + // From + // https://github.com/googleapis/java-bigtable/blob/e0ce2fe3c1207731d15e56faec66ba099652b87c/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java#L406-L410 + ChannelPoolSettings poolSettings = + ChannelPoolSettings.builder() + .setInitialChannelCount(10) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(50) + .setPreemptiveRefreshEnabled(true) + .build(); + if (dataChannel == null) { dataChannel = - ManagedChannelBuilder.forAddress(dataEndpoint.getName(), dataEndpoint.getPort()) - .userAgent(userAgent) - .maxInboundMessageSize(256 * 1024 * 1024) - .disableRetry() - .keepAliveTime(30, TimeUnit.SECONDS) - .keepAliveTimeout(10, TimeUnit.SECONDS) - .build(); + ChannelPool.create( + poolSettings, + () -> + new DataChannel( + resourceCollector, + userAgent, + callCredentials, + dataEndpoint.getName(), + dataEndpoint.getPort(), + refreshExecutor, + metrics)); } + if (adminChannel == null) { adminChannel = ManagedChannelBuilder.forAddress(adminEndpoint.getName(), adminEndpoint.getPort()) @@ -110,15 +144,6 @@ void start() throws IOException { .disableRetry() .build(); } - if (credentials == null) { - credentials = GoogleCredentials.getApplicationDefault(); - } - CallCredentials callCredentials = - new InstrumentedCallCredentials(MoreCallCredentials.from(credentials)); - - if (metrics == null) { - metrics = new MetricsImpl(credentials, metricsProjectId); - } Map> serviceMap = ImmutableMap.of( @@ -143,6 +168,7 @@ void start() throws IOException { } void cleanup() throws InterruptedException { + refreshExecutor.shutdown(); dataChannel.shutdown(); adminChannel.shutdown(); } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java index b7a30825d76..a9739fc200a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java @@ -36,7 +36,6 @@ *

  • {@code x-goog-api-client} - contains the client info of the downstream client
  • * */ - @AutoValue public abstract class CallLabels { private static final Key REQUEST_PARAMS = diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index 23e358289bf..be39e8f6a6e 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -36,4 +36,6 @@ public interface Metrics { void recordGfeHeaderMissing(CallLabels labels); void recordCallLatency(CallLabels labels, Status status, Duration duration); + + void updateChannelCount(int delta); } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index a69db5fd838..c55c5f14b58 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -30,6 +30,7 @@ import io.opentelemetry.api.metrics.DoubleHistogram; import io.opentelemetry.api.metrics.LongCounter; import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.LongUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.sdk.metrics.SdkMeterProvider; import io.opentelemetry.sdk.metrics.export.MetricExporter; @@ -59,6 +60,7 @@ public class MetricsImpl implements Closeable, Metrics { private final LongHistogram requestSizes; private final LongHistogram responseSizes; + private final LongUpDownCounter channelCounter; private final AtomicInteger numOutstandingRpcs = new AtomicInteger(); private final AtomicInteger maxSeen = new AtomicInteger(); @@ -114,8 +116,8 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException .histogramBuilder(METRIC_PREFIX + "client.gfe.duration") .setDescription( "Latency as measured by Google load balancer from the time it " - + "received the first byte of the request until it received the first byte" - + " of the response from the Cloud Bigtable service.") + + "received the first byte of the request until it received the first byte of" + + " the response from the Cloud Bigtable service.") .setUnit("ms") .build(); @@ -133,6 +135,13 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException .setUnit("ms") .build(); + channelCounter = + meter + .upDownCounterBuilder(METRIC_PREFIX + "client.channel.count") + .setDescription("Number of open channels") + .setUnit("{channel}") + .build(); + meter .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") .setDescription("Number of concurrent") @@ -227,4 +236,9 @@ public void recordCallLatency(CallLabels labels, Status status, Duration duratio clientCallLatencies.record(duration.toMillis(), attributes); numOutstandingRpcs.decrementAndGet(); } + + @Override + public void updateChannelCount(int delta) { + channelCounter.add(delta); + } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index d7085fbe81e..c8a4fe3934f 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -44,4 +44,7 @@ public void recordGfeHeaderMissing(CallLabels labels) {} @Override public void recordCallLatency(CallLabels labels, Status status, Duration duration) {} + + @Override + public void updateChannelCount(int delta) {} } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index 5db4b684a65..fe3a9c421fd 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -107,4 +107,8 @@ public void onCallFinished(Status status) { public void onCredentialsFetch(Status status, Duration duration) { metrics.recordCredLatency(callLabels, status, duration); } + + public CallLabels getCallLabels() { + return callLabels; + } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java new file mode 100644 index 00000000000..bc1ecc83acd --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPoolTest.java @@ -0,0 +1,804 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.channelpool; + +import static com.google.common.truth.Truth.assertThat; +import static io.grpc.MethodDescriptor.generateFullMethodName; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.MutateRowRequest; +import com.google.bigtable.v2.MutateRowResponse; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.type.Color; +import com.google.type.Money; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ClientCall.Listener; +import io.grpc.ManagedChannel; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.protobuf.ProtoUtils; +import io.grpc.stub.ClientCalls; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Handler; +import java.util.logging.LogRecord; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + +@RunWith(JUnit4.class) +public class ChannelPoolTest { + private static final int DEFAULT_AWAIT_TERMINATION_SEC = 10; + private ChannelPool pool; + + @After + public void cleanup() throws InterruptedException { + Preconditions.checkNotNull(pool, "Channel pool was never created"); + pool.shutdown(); + pool.awaitTermination(DEFAULT_AWAIT_TERMINATION_SEC, TimeUnit.SECONDS); + } + + @Test + public void testAuthority() throws IOException { + ManagedChannel sub1 = Mockito.mock(ManagedChannel.class); + ManagedChannel sub2 = Mockito.mock(ManagedChannel.class); + + Mockito.when(sub1.authority()).thenReturn("myAuth"); + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(2), + new FakeChannelFactory(Arrays.asList(sub1, sub2))); + assertThat(pool.authority()).isEqualTo("myAuth"); + } + + @Test + public void testRoundRobin() throws IOException { + ManagedChannel sub1 = Mockito.mock(ManagedChannel.class); + ManagedChannel sub2 = Mockito.mock(ManagedChannel.class); + + Mockito.when(sub1.authority()).thenReturn("myAuth"); + + ArrayList channels = Lists.newArrayList(sub1, sub2); + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(channels.size()), new FakeChannelFactory(channels)); + + verifyTargetChannel(pool, channels, sub1); + verifyTargetChannel(pool, channels, sub2); + verifyTargetChannel(pool, channels, sub1); + } + + private void verifyTargetChannel( + ChannelPool pool, List channels, ManagedChannel targetChannel) { + MethodDescriptor methodDescriptor = + BigtableGrpc.getMutateRowMethod(); + CallOptions callOptions = CallOptions.DEFAULT; + @SuppressWarnings("unchecked") + ClientCall expectedClientCall = + Mockito.mock(ClientCall.class); + + channels.forEach(Mockito::reset); + Mockito.doReturn(expectedClientCall).when(targetChannel).newCall(methodDescriptor, callOptions); + + ClientCall actualCall = + pool.newCall(methodDescriptor, callOptions); + Mockito.verify(targetChannel, Mockito.times(1)).newCall(methodDescriptor, callOptions); + actualCall.start(null, null); + Mockito.verify(expectedClientCall, Mockito.times(1)).start(Mockito.any(), Mockito.any()); + + for (ManagedChannel otherChannel : channels) { + if (otherChannel != targetChannel) { + Mockito.verify(otherChannel, Mockito.never()).newCall(methodDescriptor, callOptions); + } + } + } + + @Test + public void ensureEvenDistribution() throws InterruptedException, IOException { + int numChannels = 10; + final ManagedChannel[] channels = new ManagedChannel[numChannels]; + final AtomicInteger[] counts = new AtomicInteger[numChannels]; + + MethodDescriptor methodDescriptor = + BigtableGrpc.getMutateRowMethod(); + final CallOptions callOptions = CallOptions.DEFAULT; + @SuppressWarnings("unchecked") + final ClientCall clientCall = + Mockito.mock(ClientCall.class); + + for (int i = 0; i < numChannels; i++) { + final int index = i; + + counts[i] = new AtomicInteger(); + + channels[i] = Mockito.mock(ManagedChannel.class); + Mockito.when(channels[i].newCall(methodDescriptor, callOptions)) + .thenAnswer( + (ignored) -> { + counts[index].incrementAndGet(); + return clientCall; + }); + } + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(numChannels), + new FakeChannelFactory(Arrays.asList(channels))); + + int numThreads = 20; + final int numPerThread = 1000; + + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + executor.submit( + () -> { + for (int j = 0; j < numPerThread; j++) { + pool.newCall(methodDescriptor, callOptions); + } + }); + } + executor.shutdown(); + boolean shutdown = executor.awaitTermination(1, TimeUnit.MINUTES); + assertThat(shutdown).isTrue(); + + int expectedCount = (numThreads * numPerThread) / numChannels; + for (AtomicInteger count : counts) { + assertThat(count.get()).isAnyOf(expectedCount, expectedCount + 1); + } + } + + // Test channelPrimer is called same number of times as poolSize if executorService is set to null + @Test + public void channelPrimerShouldCallPoolConstruction() throws IOException { + ChannelPrimer mockChannelPrimer = Mockito.mock(ChannelPrimer.class); + ManagedChannel channel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel2 = Mockito.mock(ManagedChannel.class); + + pool = + ChannelPool.create( + ChannelPoolSettings.staticallySized(2).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + new FakeChannelFactory(Arrays.asList(channel1, channel2), mockChannelPrimer)); + Mockito.verify(mockChannelPrimer, Mockito.times(2)) + .primeChannel(Mockito.any(ManagedChannel.class)); + } + + // Test channelPrimer is called periodically, if there's an executorService + @Test + public void channelPrimerIsCalledPeriodically() throws IOException { + ChannelPrimer mockChannelPrimer = Mockito.mock(ChannelPrimer.class); + ManagedChannel channel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel2 = Mockito.mock(ManagedChannel.class); + ManagedChannel channel3 = Mockito.mock(ManagedChannel.class); + + List channelRefreshers = new ArrayList<>(); + + ScheduledExecutorService scheduledExecutorService = + Mockito.mock(ScheduledExecutorService.class); + + Answer extractChannelRefresher = + invocation -> { + channelRefreshers.add(invocation.getArgument(0)); + return Mockito.mock(ScheduledFuture.class); + }; + + Mockito.doAnswer(extractChannelRefresher) + .when(scheduledExecutorService) + .scheduleAtFixedRate( + Mockito.any(Runnable.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.any()); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(Arrays.asList(channel1, channel2, channel3), mockChannelPrimer); + + pool = + new ChannelPool( + ChannelPoolSettings.staticallySized(1).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + channelFactory, + scheduledExecutorService); + // 1 call during the creation + Mockito.verify(mockChannelPrimer, Mockito.times(1)) + .primeChannel(Mockito.any(ManagedChannel.class)); + + channelRefreshers.get(0).run(); + // 1 more call during channel refresh + Mockito.verify(mockChannelPrimer, Mockito.times(2)) + .primeChannel(Mockito.any(ManagedChannel.class)); + + channelRefreshers.get(0).run(); + // 1 more call during channel refresh + Mockito.verify(mockChannelPrimer, Mockito.times(3)) + .primeChannel(Mockito.any(ManagedChannel.class)); + } + + // ---- + // call should be allowed to complete and the channel should not be shutdown + @Test + public void callShouldCompleteAfterCreation() throws IOException { + ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on entry + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + pool.refresh(); + // shutdown is not called because there is still an outstanding call, even if it hasn't started + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + + // start clientCall + call.start(listener, new Metadata()); + // send message and end the call + call.sendMessage("message"); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + + // Replacement channel shouldn't be touched + Mockito.verify(replacementChannel, Mockito.never()).shutdown(); + Mockito.verify(replacementChannel, Mockito.never()).newCall(Mockito.any(), Mockito.any()); + } + + // call should be allowed to complete and the channel should not be shutdown + @Test + public void callShouldCompleteAfterStarted() throws IOException { + final ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on safeShutdownManagedChannel + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + // start clientCall + call.start(listener, new Metadata()); + pool.refresh(); + + // shutdown is not called because there is still an outstanding call + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + // send message and end the call + call.sendMessage("message"); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + } + + // Channel should be shutdown after a refresh all the calls have completed + @Test + public void channelShouldShutdown() throws IOException { + ManagedChannel underlyingChannel = Mockito.mock(ManagedChannel.class); + ManagedChannel replacementChannel = Mockito.mock(ManagedChannel.class); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel, replacementChannel)); + pool = ChannelPool.create(ChannelPoolSettings.staticallySized(1), channelFactory); + + // create a mock call when new call comes to the underlying channel + MockClientCall mockClientCall = new MockClientCall<>(1, Status.OK); + MockClientCall spyClientCall = Mockito.spy(mockClientCall); + Mockito.when( + underlyingChannel.newCall( + Mockito.>any(), Mockito.any(CallOptions.class))) + .thenReturn(spyClientCall); + + Answer verifyChannelNotShutdown = + invocation -> { + Mockito.verify(underlyingChannel, Mockito.never()).shutdown(); + return invocation.callRealMethod(); + }; + + // verify that underlying channel is not shutdown when clientCall is still sending message + Mockito.doAnswer(verifyChannelNotShutdown).when(spyClientCall).sendMessage(Mockito.anyString()); + + // create a new call on safeShutdownManagedChannel + @SuppressWarnings("unchecked") + ClientCall.Listener listener = Mockito.mock(ClientCall.Listener.class); + ClientCall call = + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + // start clientCall + call.start(listener, new Metadata()); + // send message and end the call + call.sendMessage("message"); + // shutdown is not called because it has not been shutdown yet + Mockito.verify(underlyingChannel, Mockito.after(200).never()).shutdown(); + pool.refresh(); + // shutdown is called because the outstanding call has completed + Mockito.verify(underlyingChannel, Mockito.atLeastOnce()).shutdown(); + } + + @Test + public void channelRefreshShouldSwapChannels() throws IOException { + ManagedChannel underlyingChannel1 = Mockito.mock(ManagedChannel.class); + ManagedChannel underlyingChannel2 = Mockito.mock(ManagedChannel.class); + + // mock executor service to capture the runnable scheduled, so we can invoke it when we want to + ScheduledExecutorService scheduledExecutorService = + Mockito.mock(ScheduledExecutorService.class); + + Mockito.doReturn(null) + .when(scheduledExecutorService) + .schedule( + Mockito.any(Runnable.class), Mockito.anyLong(), Mockito.eq(TimeUnit.MILLISECONDS)); + + FakeChannelFactory channelFactory = + new FakeChannelFactory(ImmutableList.of(underlyingChannel1, underlyingChannel2)); + pool = + new ChannelPool( + ChannelPoolSettings.staticallySized(1).toBuilder() + .setPreemptiveRefreshEnabled(true) + .build(), + channelFactory, + scheduledExecutorService); + Mockito.reset(underlyingChannel1); + + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + Mockito.verify(underlyingChannel1, Mockito.only()) + .newCall(Mockito.>any(), Mockito.any(CallOptions.class)); + + // swap channel + pool.refresh(); + + pool.newCall(FakeMethodDescriptor.create(), CallOptions.DEFAULT); + + Mockito.verify(underlyingChannel2, Mockito.only()) + .newCall(Mockito.>any(), Mockito.any(CallOptions.class)); + } + + @Test + public void channelCountShouldNotChangeWhenOutstandingRpcsAreWithinLimits() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List> startedCalls = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + startedCalls.add(clientCall); + return clientCall; + }); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // Start the minimum number of + for (int i = 0; i < 2; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + pool.resize(); + assertThat(pool.entries.get()).hasSize(2); + + // Add enough RPCs to be just at the brink of expansion + for (int i = startedCalls.size(); i < 4; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + pool.resize(); + assertThat(pool.entries.get()).hasSize(2); + + // Add another RPC to push expansion + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT); + pool.resize(); + assertThat(pool.entries.get()).hasSize(4); // += ChannelPool::MAX_RESIZE_DELTA + assertThat(startedCalls).hasSize(5); + + // Complete RPCs to the brink of shrinking + @SuppressWarnings("unchecked") + ArgumentCaptor> captor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + Mockito.verify(startedCalls.remove(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Resize twice: the first round maintains the peak from the last cycle + pool.resize(); + pool.resize(); + assertThat(pool.entries.get()).hasSize(4); + assertThat(startedCalls).hasSize(4); + + // Complete another RPC to trigger shrinking + Mockito.verify(startedCalls.remove(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Resize twice: the first round maintains the peak from the last cycle + pool.resize(); + pool.resize(); + assertThat(startedCalls).hasSize(3); + // range of channels is [2-3] rounded down average is 2 + assertThat(pool.entries.get()).hasSize(2); + } + + @Test + public void removedIdleChannelsAreShutdown() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List channels = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + return clientCall; + }); + + channels.add(channel); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // With no outstanding RPCs, the pool should shrink + pool.resize(); + assertThat(pool.entries.get()).hasSize(1); + Mockito.verify(channels.get(1), Mockito.times(1)).shutdown(); + } + + @Test + public void removedActiveChannelsAreShutdown() throws Exception { + ScheduledExecutorService executor = Mockito.mock(ScheduledExecutorService.class); + + List channels = new ArrayList<>(); + List> startedCalls = new ArrayList<>(); + + ChannelFactory channelFactory = + () -> { + ManagedChannel channel = Mockito.mock(ManagedChannel.class); + Mockito.when(channel.newCall(Mockito.any(), Mockito.any())) + .thenAnswer( + invocation -> { + @SuppressWarnings("unchecked") + ClientCall clientCall = Mockito.mock(ClientCall.class); + startedCalls.add(clientCall); + return clientCall; + }); + + channels.add(channel); + return channel; + }; + + pool = + new ChannelPool( + ChannelPoolSettings.builder() + .setInitialChannelCount(2) + .setMinRpcsPerChannel(1) + .setMaxRpcsPerChannel(2) + .build(), + channelFactory, + executor); + assertThat(pool.entries.get()).hasSize(2); + + // Start 2 RPCs + for (int i = 0; i < 2; i++) { + ClientCalls.futureUnaryCall( + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT), + MutateRowRequest.getDefaultInstance()); + } + // Complete the first one + @SuppressWarnings("unchecked") + ArgumentCaptor> captor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + Mockito.verify(startedCalls.get(0)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + + // With a single RPC, the pool should shrink + pool.resize(); + pool.resize(); + assertThat(pool.entries.get()).hasSize(1); + + // While the RPC is outstanding, the channel should still be open + Mockito.verify(channels.get(1), Mockito.never()).shutdown(); + + // Complete the RPC + Mockito.verify(startedCalls.get(1)).start(captor.capture(), Mockito.any()); + captor.getValue().onClose(Status.ABORTED, new Metadata()); + // Now the channel should be closed + Mockito.verify(channels.get(1), Mockito.times(1)).shutdown(); + } + + @Test + public void testReleasingClientCallCancelEarly() throws IOException { + @SuppressWarnings("unchecked") + ClientCall mockClientCall = Mockito.mock(ClientCall.class); + Mockito.doAnswer(invocation -> null).when(mockClientCall).cancel(Mockito.any(), Mockito.any()); + ManagedChannel fakeChannel = Mockito.mock(ManagedChannel.class); + Mockito.when(fakeChannel.newCall(Mockito.any(), Mockito.any())).thenReturn(mockClientCall); + ChannelPoolSettings channelPoolSettings = ChannelPoolSettings.staticallySized(1); + ChannelFactory factory = new FakeChannelFactory(ImmutableList.of(fakeChannel)); + pool = ChannelPool.create(channelPoolSettings, factory); + + ClientCall call = + pool.newCall(BigtableGrpc.getMutateRowMethod(), CallOptions.DEFAULT); + call.cancel(null, null); + + IllegalStateException e = + Assert.assertThrows( + IllegalStateException.class, () -> call.start(new Listener<>() {}, new Metadata())); + assertThat(e.getCause()).isInstanceOf(CancellationException.class); + assertThat(e.getMessage()).isEqualTo("Call is already cancelled"); + } + + @Test + public void testDoubleRelease() throws Exception { + FakeLogHandler logHandler = new FakeLogHandler(); + ChannelPool.LOG.addHandler(logHandler); + + try { + // Create a fake channel pool thats backed by mock channels that simply record invocations + @SuppressWarnings("unchecked") + ClientCall mockClientCall = + Mockito.mock(ClientCall.class); + ManagedChannel fakeChannel = Mockito.mock(ManagedChannel.class); + Mockito.when( + fakeChannel.newCall( + Mockito.eq(BigtableGrpc.getMutateRowMethod()), Mockito.any(CallOptions.class))) + .thenReturn(mockClientCall); + ChannelPoolSettings channelPoolSettings = ChannelPoolSettings.staticallySized(1); + ChannelFactory factory = new FakeChannelFactory(ImmutableList.of(fakeChannel)); + + pool = ChannelPool.create(channelPoolSettings, factory); + + // Start the RPC + ListenableFuture rpcFuture = + BigtableGrpc.newFutureStub(pool).mutateRow(MutateRowRequest.getDefaultInstance()); + + // Get the server side listener and intentionally close it twice + @SuppressWarnings("unchecked") + ArgumentCaptor> clientCallListenerCaptor = + ArgumentCaptor.forClass(ClientCall.Listener.class); + + Mockito.verify(mockClientCall).start(clientCallListenerCaptor.capture(), Mockito.any()); + clientCallListenerCaptor.getValue().onClose(Status.INTERNAL, new Metadata()); + clientCallListenerCaptor.getValue().onClose(Status.UNKNOWN, new Metadata()); + + // Ensure that the channel pool properly logged the double call and kept the refCount correct + assertThat(logHandler.getAllMessages()) + .contains( + "Call is being closed more than once. Please make sure that onClose() is not being" + + " manually called."); + assertThat(pool.entries.get()).hasSize(1); + ChannelPool.Entry entry = pool.entries.get().get(0); + assertThat(entry.outstandingRpcs.get()).isEqualTo(0); + } finally { + ChannelPool.LOG.removeHandler(logHandler); + } + } + + static class FakeChannelFactory implements ChannelFactory { + private int called = 0; + private final List channels; + private ChannelPrimer channelPrimer; + + public FakeChannelFactory(List channels) { + this.channels = channels; + } + + public FakeChannelFactory(List channels, ChannelPrimer channelPrimer) { + this.channels = channels; + this.channelPrimer = channelPrimer; + } + + public ManagedChannel createSingleChannel() { + ManagedChannel managedChannel = channels.get(called++); + if (this.channelPrimer != null) { + this.channelPrimer.primeChannel(managedChannel); + } + return managedChannel; + } + } + + static class FakeLogHandler extends Handler { + List records = new ArrayList<>(); + + @Override + public void publish(LogRecord record) { + records.add(record); + } + + @Override + public void flush() {} + + @Override + public void close() throws SecurityException {} + + public List getAllMessages() { + return records.stream().map(LogRecord::getMessage).collect(Collectors.toList()); + } + } + + public interface ChannelPrimer { + void primeChannel(ManagedChannel managedChannel); + } + + static class MockClientCall extends ClientCall { + + private final ResponseT response; + private Listener responseListener; + private Metadata headers; + private final Status status; + + public MockClientCall(ResponseT response, Status status) { + this.response = response; + this.status = status; + } + + @Override + public synchronized void start(Listener responseListener, Metadata headers) { + this.responseListener = responseListener; + this.headers = headers; + } + + @Override + public void request(int numMessages) {} + + @Override + public void cancel(@Nullable String message, @Nullable Throwable cause) {} + + @Override + public void halfClose() {} + + @Override + public void sendMessage(RequestT message) { + responseListener.onHeaders(headers); + responseListener.onMessage(response); + responseListener.onClose(status, headers); + } + } + + static class FakeMethodDescriptor { + // Utility class, uninstantiable. + private FakeMethodDescriptor() {} + + public static MethodDescriptor create() { + return create(MethodDescriptor.MethodType.UNARY, "FakeClient/fake-method"); + } + + public static MethodDescriptor create( + MethodDescriptor.MethodType type, String name) { + return MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName(name) + .setRequestMarshaller(new FakeMarshaller()) + .setResponseMarshaller(new FakeMarshaller()) + .build(); + } + + private static class FakeMarshaller implements MethodDescriptor.Marshaller { + @Override + public T parse(InputStream stream) { + throw new UnsupportedOperationException("FakeMarshaller doesn't actually do anything"); + } + + @Override + public InputStream stream(T value) { + throw new UnsupportedOperationException("FakeMarshaller doesn't actually do anything"); + } + } + } + + static final MethodDescriptor METHOD_RECOGNIZE = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName("google.gax.FakeService", "Recognize")) + .setRequestMarshaller(ProtoUtils.marshaller(Color.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Money.getDefaultInstance())) + .build(); + + public static final MethodDescriptor METHOD_SERVER_STREAMING_RECOGNIZE = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName( + generateFullMethodName("google.gax.FakeService", "ServerStreamingRecognize")) + .setRequestMarshaller(ProtoUtils.marshaller(Color.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(Money.getDefaultInstance())) + .build(); +} From 164ddd47d46c0ac891ca8a7b6979d165d80e5d50 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 22 Nov 2024 17:52:18 -0500 Subject: [PATCH 23/66] docs: packaging & docs (#9695) stack-info: PR: https://github.com/GoogleCloudPlatform/java-docs-samples/pull/9695, branch: igorbernstein2/stack/6 --- bigtable/bigtable-proxy/README.md | 88 +++++++++++++++++++ bigtable/bigtable-proxy/pom.xml | 39 ++++++++ .../src/main/assembly/assembly.xml | 52 +++++++++++ .../cloud/bigtable/examples/proxy/Main.java | 2 +- .../examples/proxy/commands/Serve.java | 2 +- .../examples/proxy/metrics/MetricsImpl.java | 4 +- .../src/main/scripts/bigtable-proxy.sh | 16 ++++ 7 files changed, 199 insertions(+), 4 deletions(-) create mode 100644 bigtable/bigtable-proxy/README.md create mode 100644 bigtable/bigtable-proxy/src/main/assembly/assembly.xml create mode 100755 bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md new file mode 100644 index 00000000000..b4dcb2705d9 --- /dev/null +++ b/bigtable/bigtable-proxy/README.md @@ -0,0 +1,88 @@ +# Bigtable proxy + +## Overview + +A simple server meant to be used as a sidecar to maintain a persistent connection to Bigtable and +collect metrics. The primary purpose is to support applications that can't maintain a longlived +gRPC connection (ie. php in apache). + +The proxy is intended to be used as a local sidecar process. The proxy is intended to be shared by +all processes on the VM that it is running on. It's listening address is hardcoded to `localhost`. +The proxy will use [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) +for all outbound RPCs. + +The proxy will accept local unencrypted connections from Bigtable clients, and: +- attach credentials +- export metrics +- send the RPC over an encrypted channel pool to Bigtable service + +## Features + +* Metrics - The proxy will track RPC metrics and export them to Google Cloud Monitoring +* Multi tenant - The proxy can be used to connect to many different Bigtable instances +* Credential handling - The proxy has its own set of credentials. It will ignore any inbound + credentials from the client +* Channel pooling - The proxy will maintain and autosize the outbound channel pool to properly + load balance RPCs. + +## Metrics + +The proxy is instrumented with Opentelemtry and will export those metrics to Google Cloud Monitoring +in a project your choosing. The metrics will be published under the namespace +`workload.googleapis.com`. Available metrics: + +* `bigtableproxy.server.call.started` The total number of RPCs started, including those that have + not completed. +* `bigtableproxy.client.call.credential.duration` Latency of getting credentials +* `bigtableproxy.client.call.queue.duration` Duration of how long the outbound side of the proxy had + the RPC queued +* `bigtableproxy.client.call.sent_total_message_size` Total bytes sent per call to Bigtable service + (excluding metadata, grpc and transport framing bytes +* `bigtableproxy.client.call.rcvd_total_message_size` Total bytes received per call from Bigtable + service (excluding metadata, grpc and transport framing bytes) +* `bigtableproxy.client.gfe.duration` Latency as measured by Google load balancer from the time it + received the first byte of the request until it received the first byte of the response from the + Cloud Bigtable service. +* `bigtableproxy.client.gfe.duration_missing.count` Count of calls missing gfe response headers +* `bigtableproxy.client.call.duration` Total duration of how long the outbound call took +* `bigtableproxy.client.channel.count` Number of open channels +* `bigtableproxy.client.call.max_outstanding_count` Maximum number of concurrent RPCs in a single + minute window + +## Requirements + +* JVM >= 11 +* Ensure that the service account includes the IAM roles: + * `Monitoring Metric Writer` + * `Bigtable User` +* Ensure that the metrics project has `Stackdriver Monitoring API` enabled + +## Expected usage + +```sh +# Build the binary +mvn package + +# use the binary +unzip target/bigtable-proxy-0.0.1-SNAPSHOT-bin.zip +cd bigtable-proxy-0.0.1-SNAPSHOT +./bigtable-proxy.sh \ + --listen-port=1234 \ + --metrics-project-id=SOME_GCP_PROJECT + +export BIGTABLE_EMULATOR_HOST=1234 +path/to/application/with/bigtable/client +``` + +## Configuration + +Required options: +* `--listen-port=` The local port to listen for Bigtable client connections. This needs to + match port in the `BIGTABLE_EMULATOR_HOST="localhost:` environment variable passed to your + application. +* `--metrics-project-id=` The Google Cloud project that should be used to collect metrics + emitted from the proxy. + +Optional configuration: +* The environment variable `GOOGLE_APPLICATION_CREDENTIALS` can be used to use a non-default service + account. More details can be found here: https://cloud.google.com/docs/authentication/application-default-credentials diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index db972f2c19d..07666449a73 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -217,6 +217,45 @@ maven-surefire-plugin 3.5.2 + + + org.apache.maven.plugins + maven-jar-plugin + 3.4.2 + + + + true + + lib/ + com.google.cloud.bigtable.examples.proxy.Main + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 3.7.1 + + + + + src/main/assembly/assembly.xml + + + + + + assemble + + single + + package + + + diff --git a/bigtable/bigtable-proxy/src/main/assembly/assembly.xml b/bigtable/bigtable-proxy/src/main/assembly/assembly.xml new file mode 100644 index 00000000000..47126e8861f --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/assembly/assembly.xml @@ -0,0 +1,52 @@ + + bin + + + zip + + + + + + + false + lib + false + + + + + + + ${project.basedir} + + + README* + LICENSE* + NOTICE* + + + + + + ${project.build.scriptSourceDirectory} + + + *.sh + + true + + + + + + ${project.build.directory} + + + *.jar + + + + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java index 54d48f334d1..257f9bde606 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -27,7 +27,7 @@ * Main entry point for proxy commands under {@link * com.google.cloud.bigtable.examples.proxy.commands}. */ -@Command(subcommands = {Serve.class}) +@Command(subcommands = {Serve.class}, name = "bigtable-proxy") public final class Main { private static final Logger LOGGER = LoggerFactory.getLogger(Main.class); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index a3ed0c9a50f..ba980e976ed 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -52,7 +52,7 @@ import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; -@Command(name = "serve", mixinStandardHelpOptions = true, description = "Start the proxy server") +@Command(name = "serve", description = "Start the proxy server") public class Serve implements Callable { private static final Logger LOGGER = LoggerFactory.getLogger(Serve.class); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index c55c5f14b58..3bf09688168 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -78,7 +78,7 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException clientCredLatencies = meter - .histogramBuilder(METRIC_PREFIX + "client.call.credential.refresh.duration") + .histogramBuilder(METRIC_PREFIX + "client.call.credential.duration") .setDescription("Latency of getting credentials") .setUnit("ms") .build(); @@ -144,7 +144,7 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException meter .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") - .setDescription("Number of concurrent") + .setDescription("Maximum number of concurrent RPCs in a single minute window") .setUnit("{call}") .ofLongs() .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); diff --git a/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh b/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh new file mode 100755 index 00000000000..58b35e9c0a9 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/scripts/bigtable-proxy.sh @@ -0,0 +1,16 @@ +#!/bin/sh + # Copyright 2024 Google LLC + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +java -jar ${project.build.finalName}.jar serve "$@" From 10e9a3c2ead17833a553113bdf76eb9dd5ca764f Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 25 Nov 2024 10:26:26 -0500 Subject: [PATCH 24/66] feat: add presence metric for bigtable-proxy (#9696) --- bigtable/bigtable-proxy/README.md | 1 + .../cloud/bigtable/examples/proxy/metrics/MetricsImpl.java | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md index b4dcb2705d9..2e7b908f9c0 100644 --- a/bigtable/bigtable-proxy/README.md +++ b/bigtable/bigtable-proxy/README.md @@ -48,6 +48,7 @@ in a project your choosing. The metrics will be published under the namespace * `bigtableproxy.client.channel.count` Number of open channels * `bigtableproxy.client.call.max_outstanding_count` Maximum number of concurrent RPCs in a single minute window +* `bigtableproxy.presence` Counts number of proxy processes (emit 1 per process). ## Requirements diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 3bf09688168..27200692472 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -148,6 +148,13 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException .setUnit("{call}") .ofLongs() .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); + + meter + .gaugeBuilder(METRIC_PREFIX + ".presence") + .setDescription("Number of proxy processes") + .setUnit("{process}") + .ofLongs() + .buildWithCallback(o -> o.record(1)); } @Override From 9ec2f116317c28b1bf0ffead49c6509d9b25e620 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 25 Nov 2024 11:42:13 -0500 Subject: [PATCH 25/66] Verify (#9732) * feat: add automatic resource mapping for metrics * feat: add a verification subcommand --- bigtable/bigtable-proxy/pom.xml | 21 +- .../cloud/bigtable/examples/proxy/Main.java | 7 +- .../examples/proxy/commands/Verify.java | 251 ++++++++++++++++++ .../examples/proxy/metrics/MetricsImpl.java | 35 +-- .../src/main/scripts/bigtable-verify.sh | 16 ++ 5 files changed, 301 insertions(+), 29 deletions(-) create mode 100644 bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java create mode 100755 bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index 07666449a73..345ef10de16 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -21,14 +21,19 @@ UTF-8 + 26.50.0 - + 1.44.1 + 1.41.0-alpha + 0.33.0 0.33.0 + 2.0.16 1.5.12 1.11.0 4.7.6 + 4.13.2 1.4.4 @@ -125,6 +130,20 @@ exporter-metrics ${exporter-metrics.version} + + io.opentelemetry.contrib + opentelemetry-gcp-resources + ${otel-contrib.version} + + + io.opentelemetry + opentelemetry-sdk-extension-autoconfigure-spi + + + com.google.cloud.opentelemetry + shared-resourcemapping + ${shared-resourcemapping.version} + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java index 257f9bde606..37a2ce7f692 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy; import com.google.cloud.bigtable.examples.proxy.commands.Serve; +import com.google.cloud.bigtable.examples.proxy.commands.Verify; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.bridge.SLF4JBridgeHandler; @@ -27,10 +28,10 @@ * Main entry point for proxy commands under {@link * com.google.cloud.bigtable.examples.proxy.commands}. */ -@Command(subcommands = {Serve.class}, name = "bigtable-proxy") +@Command( + subcommands = {Serve.class, Verify.class}, + name = "bigtable-proxy") public final class Main { - private static final Logger LOGGER = LoggerFactory.getLogger(Main.class); - public static void main(String[] args) { SLF4JBridgeHandler.install(); new CommandLine(new Main()).execute(args); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java new file mode 100644 index 00000000000..58f35539741 --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -0,0 +1,251 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.commands; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.bigtable.v2.BigtableGrpc; +import com.google.bigtable.v2.BigtableGrpc.BigtableBlockingStub; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.CheckAndMutateRowResponse; +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.Mutation.DeleteFromRow; +import com.google.bigtable.v2.ReadRowsRequest; +import com.google.bigtable.v2.ReadRowsResponse; +import com.google.bigtable.v2.RowFilter; +import com.google.bigtable.v2.RowFilter.Chain; +import com.google.bigtable.v2.RowSet; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.cloud.opentelemetry.resource.GcpResource; +import com.google.cloud.opentelemetry.resource.ResourceTranslator; +import com.google.common.collect.ImmutableList; +import com.google.common.net.PercentEscaper; +import com.google.protobuf.ByteString; +import io.grpc.CallCredentials; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.Deadline; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.Metadata.Key; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.auth.MoreCallCredentials; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.resources.Resource; +import java.io.IOException; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Iterator; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import picocli.CommandLine.Command; +import picocli.CommandLine.Help.Visibility; +import picocli.CommandLine.Option; + +@Command(name = "verify", description = "Verify environment is properly set up") +public class Verify implements Callable { + @Option( + names = "--bigtable-project-id", + required = true, + description = "Project that contains a Bigtable instance to use for connectivity test") + String bigtableProjectId; + + @Option( + names = "--bigtable-instance-id", + required = true, + description = "Bigtable instance to use for connectivity test") + String bigtableInstanceId; + + @Option( + names = "--bigtable-table-id", + required = true, + description = "Bigtable table to use for connectivity test") + String bigtableTableId; + + @Option( + names = "--metrics-project-id", + required = true, + description = "The project id where metrics should be exported") + String metricsProjectId = null; + + @Option( + names = "--bigtable-data-endpoint", + converter = Endpoint.ArgConverter.class, + showDefaultValue = Visibility.ALWAYS) + Endpoint dataEndpoint = Endpoint.create("bigtable.googleapis.com", 443); + + + Credentials credentials = null; + + @Override + public Void call() throws Exception { + if (credentials == null) { + credentials = GoogleCredentials.getApplicationDefault(); + } + checkBigtable( + MoreCallCredentials.from(credentials), + String.format( + "projects/%s/instances/%s/tables/%s", + bigtableProjectId, bigtableInstanceId, bigtableTableId)); + + checkMetrics(credentials); + return null; + } + + private void checkBigtable(CallCredentials callCredentials, String tableName) { + ManagedChannel channel = + ManagedChannelBuilder.forAddress(dataEndpoint.getName(), dataEndpoint.getPort()).build(); + + try { + Metadata md = new Metadata(); + PercentEscaper escaper = new PercentEscaper("", true); + md.put( + Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), + String.format("table_name=%s&app_profile_id=%s", escaper.escape(tableName), "")); + + BigtableBlockingStub stub = + BigtableGrpc.newBlockingStub(channel) + .withCallCredentials(callCredentials) + .withInterceptors(new MetadataInterceptor(md)); + + ReadRowsRequest readRequest = + ReadRowsRequest.newBuilder() + .setTableName( + String.format( + "projects/%s/instances/%s/tables/%s", + bigtableProjectId, bigtableTableId, bigtableTableId)) + .setRowsLimit(1) + .setRows( + RowSet.newBuilder().addRowKeys(ByteString.copyFromUtf8("some-nonexistent-row"))) + .setFilter( + RowFilter.newBuilder() + .setChain( + Chain.newBuilder() + .addFilters(RowFilter.newBuilder().setCellsPerRowLimitFilter(1)) + .addFilters( + RowFilter.newBuilder().setStripValueTransformer(true).build()))) + .build(); + + Iterator readIt = + stub.withDeadline(Deadline.after(1, TimeUnit.SECONDS)).readRows(readRequest); + + try { + while (readIt.hasNext()) { + readIt.next(); + } + System.out.println("Bigtable Read: OK"); + } catch (StatusRuntimeException e) { + System.out.println("Bigtable Read: Failed - " + e.getStatus()); + return; + } + + CheckAndMutateRowRequest rwReq = + CheckAndMutateRowRequest.newBuilder() + .setTableName(tableName) + .setRowKey(ByteString.copyFromUtf8("some-non-existent-row")) + .setPredicateFilter(RowFilter.newBuilder().setBlockAllFilter(true)) + .addTrueMutations( + Mutation.newBuilder().setDeleteFromRow(DeleteFromRow.getDefaultInstance())) + .build(); + + try { + CheckAndMutateRowResponse ignored = stub.checkAndMutateRow(rwReq); + System.out.println("Bigtable Read/Write: OK"); + } catch (StatusRuntimeException e) { + System.out.println("Bigtable Read/Write: Failed - " + e.getStatus()); + return; + } + } finally { + channel.shutdown(); + } + } + + void checkMetrics(Credentials creds) throws IOException { + Instant now = Instant.now().truncatedTo(ChronoUnit.MINUTES); + Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); + + GCPResourceProvider resourceProvider = new GCPResourceProvider(); + Resource resource = Resource.create(resourceProvider.getAttributes()); + GcpResource gcpResource = ResourceTranslator.mapResource(resource); + + MetricExporter exporter = + GoogleCloudMetricExporter.createWithConfiguration( + MetricConfiguration.builder() + .setCredentials(creds) + .setProjectId(metricsProjectId) + .setInstrumentationLibraryLabelsEnabled(false) + .build()); + + ImmutableList metricData = + ImmutableList.of( + ImmutableMetricData.createLongGauge( + resource, + InstrumentationScopeInfo.create("bigtable-proxy"), + "bigtableproxy.presence", + "Number of proxy processes", + "{process}", + ImmutableGaugeData.create( + ImmutableList.of( + ImmutableLongPointData.create( + TimeUnit.MILLISECONDS.toNanos(now.toEpochMilli()), + TimeUnit.MILLISECONDS.toNanos(end.toEpochMilli()), + Attributes.empty(), + 1L))))); + CompletableResultCode result = exporter.export(metricData); + result.join(1, TimeUnit.MINUTES); + + if (result.isSuccess()) { + System.out.println("Metrics write: OK"); + } else { + System.out.println("Metrics write: FAILED: " + result.getFailureThrowable().getMessage()); + } + } + + private static class MetadataInterceptor implements ClientInterceptor { + private final Metadata metadata; + + private MetadataInterceptor(Metadata metadata) { + this.metadata = metadata; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall<>(next.newCall(method, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + headers.merge(metadata); + super.start(responseListener, headers); + } + }; + } + } +} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 27200692472..9c5f3f3795b 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -16,14 +16,9 @@ package com.google.cloud.bigtable.examples.proxy.metrics; -import com.google.api.gax.core.FixedCredentialsProvider; -import com.google.api.gax.grpc.GrpcTransportChannel; -import com.google.api.gax.rpc.FixedTransportChannelProvider; import com.google.auth.Credentials; -import com.google.cloud.monitoring.v3.MetricServiceSettings; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; -import io.grpc.ManagedChannelBuilder; import io.grpc.Status; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -32,16 +27,19 @@ import io.opentelemetry.api.metrics.LongHistogram; import io.opentelemetry.api.metrics.LongUpDownCounter; import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; import io.opentelemetry.sdk.metrics.SdkMeterProvider; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.resources.Resource; import java.io.Closeable; import java.io.IOException; import java.time.Duration; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; public class MetricsImpl implements Closeable, Metrics { - private static final String METRIC_PREFIX = "bigtableproxy."; + public static final String METRIC_PREFIX = "bigtableproxy."; static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("apiclient"); static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); @@ -64,6 +62,9 @@ public class MetricsImpl implements Closeable, Metrics { private final AtomicInteger numOutstandingRpcs = new AtomicInteger(); private final AtomicInteger maxSeen = new AtomicInteger(); + static Supplier gcpResourceSupplier = + () -> Resource.create(new GCPResourceProvider().getAttributes()); + public MetricsImpl(Credentials credentials, String projectId) throws IOException { meterProvider = createMeterProvider(credentials, projectId); Meter meter = meterProvider.meterBuilder("bigtableproxy").build(); @@ -162,34 +163,18 @@ public void close() { meterProvider.close(); } - private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) - throws IOException { - MetricServiceSettings.Builder metricServiceSettingsBuilder = MetricServiceSettings.newBuilder(); - metricServiceSettingsBuilder - .setCredentialsProvider(FixedCredentialsProvider.create(credentials)) - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create( - ManagedChannelBuilder.forTarget( - MetricConfiguration.DEFAULT_METRIC_SERVICE_ENDPOINT) - // default 8 KiB - .maxInboundMetadataSize(16 * 1000) - .build()))) - .createMetricDescriptorSettings() - .setSimpleTimeoutNoRetriesDuration( - Duration.ofMillis(MetricConfiguration.DEFAULT_DEADLINE.toMillis())) - .build(); - + private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { MetricConfiguration config = MetricConfiguration.builder() .setProjectId(projectId) - .setMetricServiceSettings(metricServiceSettingsBuilder.build()) + .setCredentials(credentials) .setInstrumentationLibraryLabelsEnabled(false) .build(); MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config); return SdkMeterProvider.builder() + .setResource(gcpResourceSupplier.get()) .registerMetricReader( PeriodicMetricReader.builder(exporter).setInterval(Duration.ofMinutes(1)).build()) .build(); diff --git a/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh b/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh new file mode 100755 index 00000000000..380cb84100b --- /dev/null +++ b/bigtable/bigtable-proxy/src/main/scripts/bigtable-verify.sh @@ -0,0 +1,16 @@ +#!/bin/sh + # Copyright 2024 Google LLC + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +java -jar ${project.build.finalName}.jar verify "$@" From 3607955e87bd061d43386f8e6dc55c703519dbf7 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 26 Nov 2024 11:15:22 -0500 Subject: [PATCH 26/66] fix: misc fixes for bigtable-proxy (#9736) * fix: misc fixes for bigtable-proxy * fix dependecy conflict that shared-configuration created for the transitive dep google-cloud-core * some docs * typo in Verify & extract constants * hard fail if call options credentials receives are missing a tracer * rename the label apiclient -> api_client for readability * remove stray imports * format * presence typo * fix ping and warm * update readme for verify * add multiple response detection to ping and warm * fix comment * add a TODO for a debug metric --- bigtable/bigtable-proxy/README.md | 17 +++- bigtable/bigtable-proxy/pom.xml | 5 ++ .../cloud/bigtable/examples/proxy/Main.java | 2 - .../proxy/channelpool/DataChannel.java | 82 ++++++++++++++++--- .../examples/proxy/commands/Serve.java | 3 + .../examples/proxy/commands/Verify.java | 17 ++-- .../examples/proxy/metrics/CallLabels.java | 8 +- .../metrics/InstrumentedCallCredentials.java | 9 +- .../examples/proxy/metrics/MetricsImpl.java | 22 +++-- .../proxy/commands/ServeMetricsTest.java | 10 +-- .../proxy/metrics/CallLabelsTest.java | 4 +- 11 files changed, 135 insertions(+), 44 deletions(-) diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md index 2e7b908f9c0..48140f77a58 100644 --- a/bigtable/bigtable-proxy/README.md +++ b/bigtable/bigtable-proxy/README.md @@ -64,14 +64,27 @@ in a project your choosing. The metrics will be published under the namespace # Build the binary mvn package -# use the binary +# unpack the binary on the proxy host unzip target/bigtable-proxy-0.0.1-SNAPSHOT-bin.zip cd bigtable-proxy-0.0.1-SNAPSHOT + +# Verify that the proxy has require permissions using an existing table. Please note that the table +# data will not be modified, however a test metric will be written. +./bigtable-verify.sh \ + --bigtable-project-id=$BIGTABLE_PROJECT_ID \ + --bigtable-instance-id=$BIGTABLE_INSTANCE_ID \ + --bigtable-table-id=$BIGTABLE_TABLE_ID \ + --metrics-project-id=$METRICS_PROJECT_ID + +# Then start the proxy on the specified port. The proxy can forward requests for multiple +# Bigtable projects/instances/tables. However it will export health metrics to a single project +# specified by `metrics-project-id`. ./bigtable-proxy.sh \ --listen-port=1234 \ --metrics-project-id=SOME_GCP_PROJECT -export BIGTABLE_EMULATOR_HOST=1234 +# Start your application, and redirect the bigtable client to connect to the local proxy. +export BIGTABLE_EMULATOR_HOST="localhost:1234" path/to/application/with/bigtable/client ``` diff --git a/bigtable/bigtable-proxy/pom.xml b/bigtable/bigtable-proxy/pom.xml index 345ef10de16..1eebfccb9a4 100644 --- a/bigtable/bigtable-proxy/pom.xml +++ b/bigtable/bigtable-proxy/pom.xml @@ -130,6 +130,11 @@ exporter-metrics ${exporter-metrics.version} + + + com.google.cloud + google-cloud-core + io.opentelemetry.contrib opentelemetry-gcp-resources diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java index 37a2ce7f692..b480f3777d8 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/Main.java @@ -18,8 +18,6 @@ import com.google.cloud.bigtable.examples.proxy.commands.Serve; import com.google.cloud.bigtable.examples.proxy.commands.Verify; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.slf4j.bridge.SLF4JBridgeHandler; import picocli.CommandLine; import picocli.CommandLine.Command; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index ca6db6c83e5..e8c91769a19 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -17,22 +17,28 @@ package com.google.cloud.bigtable.examples.proxy.channelpool; import com.google.bigtable.v2.BigtableGrpc; -import com.google.bigtable.v2.BigtableGrpc.BigtableFutureStub; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.grpc.CallCredentials; import io.grpc.CallOptions; import io.grpc.ClientCall; +import io.grpc.ClientCall.Listener; import io.grpc.ConnectivityState; import io.grpc.Deadline; import io.grpc.ExperimentalApi; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; import io.grpc.MethodDescriptor; +import io.grpc.Status; import io.grpc.StatusRuntimeException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -50,7 +56,7 @@ public class DataChannel extends ManagedChannel { private final ManagedChannel inner; private final Metrics metrics; private final ResourceCollector resourceCollector; - private final BigtableFutureStub warmingStub; + private final CallCredentials callCredentials; private final ScheduledFuture antiIdleTask; private final AtomicBoolean closed = new AtomicBoolean(); @@ -65,6 +71,7 @@ public DataChannel( Metrics metrics) { this.resourceCollector = resourceCollector; + this.callCredentials = callCredentials; inner = ManagedChannelBuilder.forAddress(endpoint, port) .userAgent(userAgent) @@ -76,8 +83,6 @@ public DataChannel( this.metrics = metrics; try { - warmingStub = BigtableGrpc.newFutureStub(inner).withCallCredentials(callCredentials); - warm(); } catch (RuntimeException e) { try { @@ -107,10 +112,8 @@ private void warm() { return; } - BigtableFutureStub timedStub = warmingStub.withDeadline(Deadline.after(1, TimeUnit.MINUTES)); - List> futures = - requests.stream().map(timedStub::pingAndWarm).collect(Collectors.toList()); + requests.stream().map(this::sendPingAndWarm).collect(Collectors.toList()); int successCount = 0; int failures = 0; @@ -148,6 +151,61 @@ private void warm() { } } + private ListenableFuture sendPingAndWarm(PingAndWarmRequest request) { + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getPingAndWarmMethod(), + Optional.of("bigtableproxy"), + Optional.of(request.getName()), + Optional.of(request.getAppProfileId())); + Tracer tracer = new Tracer(metrics, callLabels); + + CallOptions callOptions = + CallOptions.DEFAULT + .withCallCredentials(callCredentials) + .withDeadline(Deadline.after(1, TimeUnit.MINUTES)); + callOptions = tracer.injectIntoCallOptions(callOptions); + + ClientCall call = + inner.newCall(BigtableGrpc.getPingAndWarmMethod(), callOptions); + + Metadata metadata = new Metadata(); + metadata.put( + CallLabels.REQUEST_PARAMS, + String.format( + "name=projects/%s/instances/%s", + URLEncoder.encode(request.getName(), StandardCharsets.UTF_8), + URLEncoder.encode(request.getAppProfileId(), StandardCharsets.UTF_8))); + + SettableFuture f = SettableFuture.create(); + call.start( + new Listener<>() { + @Override + public void onMessage(PingAndWarmResponse response) { + if (!f.set(response)) { + // TODO: set a metric + LOGGER.warn("PingAndWarm returned multiple responses"); + } + } + + @Override + public void onClose(Status status, Metadata trailers) { + tracer.onCallFinished(status); + + if (status.isOk()) { + f.setException(new IllegalStateException("PingAndWarm was missing a response")); + } else { + f.setException(status.asRuntimeException()); + } + } + }, + metadata); + call.sendMessage(request); + call.request(Integer.MAX_VALUE); + + return f; + } + @Override public ManagedChannel shutdown() { if (closed.compareAndSet(false, true)) { @@ -208,9 +266,13 @@ public void enterIdle() { @Override public ClientCall newCall( MethodDescriptor methodDescriptor, CallOptions callOptions) { - Optional.ofNullable(Tracer.extractTracerFromCallOptions(callOptions)) - .map(Tracer::getCallLabels) - .ifPresent(resourceCollector::collect); + Tracer tracer = + Optional.ofNullable(Tracer.extractTracerFromCallOptions(callOptions)) + .orElseThrow( + () -> + new IllegalStateException( + "DataChannel failed to extract Tracer from CallOptions")); + resourceCollector.collect(tracer.getCallLabels()); return inner.newCall(methodDescriptor, callOptions); } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index ba980e976ed..803c2de9bf4 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -106,6 +106,9 @@ void start() throws IOException { new InstrumentedCallCredentials(MoreCallCredentials.from(credentials)); if (metrics == null) { + // InstrumentedCallCredentials expect to only be called when a Tracer is available in the + // CallOptions. This is only true for DataChannel pingAndWarm and things invoked by + // ProxyHandler. MetricsImpl does not do this, so it must get undecorated credentials. metrics = new MetricsImpl(credentials, metricsProjectId); } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java index 58f35539741..d682cec4f7a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -29,10 +29,9 @@ import com.google.bigtable.v2.RowFilter; import com.google.bigtable.v2.RowFilter.Chain; import com.google.bigtable.v2.RowSet; +import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; -import com.google.cloud.opentelemetry.resource.GcpResource; -import com.google.cloud.opentelemetry.resource.ResourceTranslator; import com.google.common.collect.ImmutableList; import com.google.common.net.PercentEscaper; import com.google.protobuf.ByteString; @@ -53,7 +52,6 @@ import io.opentelemetry.api.common.Attributes; import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; @@ -102,7 +100,6 @@ public class Verify implements Callable { showDefaultValue = Visibility.ALWAYS) Endpoint dataEndpoint = Endpoint.create("bigtable.googleapis.com", 443); - Credentials credentials = null; @Override @@ -141,7 +138,7 @@ private void checkBigtable(CallCredentials callCredentials, String tableName) { .setTableName( String.format( "projects/%s/instances/%s/tables/%s", - bigtableProjectId, bigtableTableId, bigtableTableId)) + bigtableProjectId, bigtableInstanceId, bigtableTableId)) .setRowsLimit(1) .setRows( RowSet.newBuilder().addRowKeys(ByteString.copyFromUtf8("some-nonexistent-row"))) @@ -194,7 +191,6 @@ void checkMetrics(Credentials creds) throws IOException { GCPResourceProvider resourceProvider = new GCPResourceProvider(); Resource resource = Resource.create(resourceProvider.getAttributes()); - GcpResource gcpResource = ResourceTranslator.mapResource(resource); MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration( @@ -208,10 +204,10 @@ void checkMetrics(Credentials creds) throws IOException { ImmutableList.of( ImmutableMetricData.createLongGauge( resource, - InstrumentationScopeInfo.create("bigtable-proxy"), - "bigtableproxy.presence", - "Number of proxy processes", - "{process}", + MetricsImpl.INSTRUMENTATION_SCOPE_INFO, + MetricsImpl.METRIC_PRESENCE_NAME, + MetricsImpl.METRIC_PRESENCE_DESC, + MetricsImpl.METRIC_PRESENCE_UNIT, ImmutableGaugeData.create( ImmutableList.of( ImmutableLongPointData.create( @@ -222,6 +218,7 @@ void checkMetrics(Credentials creds) throws IOException { CompletableResultCode result = exporter.export(metricData); result.join(1, TimeUnit.MINUTES); + System.out.println("Metrics resource: " + resource); if (result.isSuccess()) { System.out.println("Metrics write: OK"); } else { diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java index a9739fc200a..05d84582398 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java @@ -29,16 +29,16 @@ * A value class to encapsulate call identity. * *

    This call extracts relevant information from request headers and makes it accessible to - * metrics & the upstream client. The primary headers consulted are:

    + * metrics & the upstream client. The primary headers consulted are: * *
      - *
    • {@code x-goog-request-params} - contains the resource and app profile id
    • - *
    • {@code x-goog-api-client} - contains the client info of the downstream client
    • + *
    • {@code x-goog-request-params} - contains the resource and app profile id + *
    • {@code x-goog-api-client} - contains the client info of the downstream client *
    */ @AutoValue public abstract class CallLabels { - private static final Key REQUEST_PARAMS = + public static final Key REQUEST_PARAMS = Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); private static final Key API_CLIENT = Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java index fbc7f176a46..9f70124f600 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java @@ -24,6 +24,7 @@ import java.time.Duration; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; public class InstrumentedCallCredentials extends CallCredentials implements InternalMayRequireSpecificExecutor { @@ -40,7 +41,13 @@ public InstrumentedCallCredentials(CallCredentials inner) { @Override public void applyRequestMetadata( RequestInfo requestInfo, Executor appExecutor, MetadataApplier applier) { - Tracer tracer = Tracer.extractTracerFromCallOptions(requestInfo.getCallOptions()); + @Nullable Tracer tracer = Tracer.extractTracerFromCallOptions(requestInfo.getCallOptions()); + if (tracer == null) { + applier.fail( + Status.INTERNAL.withDescription( + "InstrumentedCallCredentials failed to extract tracer from CallOptions")); + return; + } final Stopwatch stopwatch = Stopwatch.createStarted(); inner.applyRequestMetadata( diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 9c5f3f3795b..515e43d7ce7 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -28,6 +28,7 @@ import io.opentelemetry.api.metrics.LongUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.metrics.SdkMeterProvider; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; @@ -39,14 +40,21 @@ import java.util.function.Supplier; public class MetricsImpl implements Closeable, Metrics { + public static final InstrumentationScopeInfo INSTRUMENTATION_SCOPE_INFO = + InstrumentationScopeInfo.create("bigtable-proxy"); + public static final String METRIC_PREFIX = "bigtableproxy."; - static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("apiclient"); + static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + public static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; + public static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; + public static final String METRIC_PRESENCE_UNIT = "{process}"; + private final SdkMeterProvider meterProvider; private final DoubleHistogram gfeLatency; @@ -67,7 +75,11 @@ public class MetricsImpl implements Closeable, Metrics { public MetricsImpl(Credentials credentials, String projectId) throws IOException { meterProvider = createMeterProvider(credentials, projectId); - Meter meter = meterProvider.meterBuilder("bigtableproxy").build(); + Meter meter = + meterProvider + .meterBuilder(INSTRUMENTATION_SCOPE_INFO.getName()) + .setInstrumentationVersion(INSTRUMENTATION_SCOPE_INFO.getVersion()) + .build(); serverCallsStarted = meter @@ -151,9 +163,9 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); meter - .gaugeBuilder(METRIC_PREFIX + ".presence") - .setDescription("Number of proxy processes") - .setUnit("{process}") + .gaugeBuilder(METRIC_PRESENCE_NAME) + .setDescription(METRIC_PRESENCE_DESC) + .setUnit(METRIC_PRESENCE_UNIT) .ofLongs() .buildWithCallback(o -> o.record(1)); } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java index 40af628ed59..78bba61a0a6 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -98,19 +98,13 @@ public void setUp() throws Exception { fakeServiceChannel = grpcCleanup.register( - ManagedChannelBuilder.forAddress("localhost", server.getPort()) - .usePlaintext() - .build() - ); + ManagedChannelBuilder.forAddress("localhost", server.getPort()).usePlaintext().build()); serve = createAndStartCommand(fakeServiceChannel, fakeCredentials, mockMetrics); proxyChannel = grpcCleanup.register( - ManagedChannelBuilder.forAddress("localhost", serve.listenPort) - .usePlaintext() - .build() - ); + ManagedChannelBuilder.forAddress("localhost", serve.listenPort).usePlaintext().build()); } @After diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java index 0de81769ca3..e2852ef88cb 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java @@ -54,7 +54,7 @@ public void testAllBasic() { CallLabelsSubject.assertThat(callLabels) .hasOtelAttributesThat() .containsAtLeast( - AttributeKey.stringKey("apiclient"), "some-client", + AttributeKey.stringKey("api_client"), "some-client", AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t", AttributeKey.stringKey("app_profile"), "a", AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); @@ -82,7 +82,7 @@ public void testEmpty() { CallLabelsSubject.assertThat(callLabels) .hasOtelAttributesThat() .containsAtLeast( - AttributeKey.stringKey("apiclient"), "", + AttributeKey.stringKey("api_client"), "", AttributeKey.stringKey("resource"), "", AttributeKey.stringKey("app_profile"), "", AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); From 08aee80f3324b215e23d7f2deef05119a856706f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:00:38 +0100 Subject: [PATCH 27/66] feat(tpu): add tpu queued resources startup script sample (#9604) * Added tpu_queued_resources_network sample * Fixed samples and tests * Fixed tests * Changed CODEOWNERS * Split samples, fixed startup script path * Fixed style * Added tag * Added header * Implemented tpu_queued_resources_startup_script sample, created test * Fixed test, deleted cleanup method * Fixed test, deleted cleanup method * Fixed test * Fixed naming * Changed zone * Fixed tests * Fixed tests * Increased timeout * Fixed code as requested in comments * Deleted settings * Fixed test --- ...CreateQueuedResourceWithStartupScript.java | 106 ++++++++++++++++++ .../java/tpu/DeleteForceQueuedResource.java | 16 +-- tpu/src/test/java/tpu/QueuedResourceIT.java | 41 ++++--- 3 files changed, 141 insertions(+), 22 deletions(-) create mode 100644 tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java diff --git a/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java b/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java new file mode 100644 index 00000000000..c070a627388 --- /dev/null +++ b/tpu/src/main/java/tpu/CreateQueuedResourceWithStartupScript.java @@ -0,0 +1,106 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_queued_resources_startup_script] +import com.google.cloud.tpu.v2alpha1.CreateQueuedResourceRequest; +import com.google.cloud.tpu.v2alpha1.Node; +import com.google.cloud.tpu.v2alpha1.QueuedResource; +import com.google.cloud.tpu.v2alpha1.TpuClient; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +public class CreateQueuedResourceWithStartupScript { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to create a node. + String projectId = "YOUR_PROJECT_ID"; + // The zone in which to create the TPU. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-a"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + // The accelerator type that specifies the version and size of the Cloud TPU you want to create. + // For more information about supported accelerator types for each TPU version, + // see https://cloud.google.com/tpu/docs/system-architecture-tpu-vm#versions. + String tpuType = "v2-8"; + // Software version that specifies the version of the TPU runtime to install. + // For more information see https://cloud.google.com/tpu/docs/runtimes + String tpuSoftwareVersion = "tpu-vm-tf-2.14.1"; + // The name for your Queued Resource. + String queuedResourceId = "QUEUED_RESOURCE_ID"; + + createQueuedResource(projectId, zone, queuedResourceId, nodeName, + tpuType, tpuSoftwareVersion); + } + + // Creates a Queued Resource with startup script. + public static QueuedResource createQueuedResource( + String projectId, String zone, String queuedResourceId, + String nodeName, String tpuType, String tpuSoftwareVersion) + throws IOException, ExecutionException, InterruptedException { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + String startupScriptContent = "#!/bin/bash\necho \"Hello from the startup script!\""; + // Add startup script to metadata + Map metadata = new HashMap<>(); + metadata.put("startup-script", startupScriptContent); + String queuedResourceForTpu = String.format("projects/%s/locations/%s/queuedResources/%s", + projectId, zone, queuedResourceId); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + Node node = + Node.newBuilder() + .setName(nodeName) + .setAcceleratorType(tpuType) + .setRuntimeVersion(tpuSoftwareVersion) + .setQueuedResource(queuedResourceForTpu) + .putAllMetadata(metadata) + .build(); + + QueuedResource queuedResource = + QueuedResource.newBuilder() + .setName(queuedResourceId) + .setTpu( + QueuedResource.Tpu.newBuilder() + .addNodeSpec( + QueuedResource.Tpu.NodeSpec.newBuilder() + .setParent(parent) + .setNode(node) + .setNodeId(nodeName) + .build()) + .build()) + .build(); + + CreateQueuedResourceRequest request = + CreateQueuedResourceRequest.newBuilder() + .setParent(parent) + .setQueuedResourceId(queuedResourceId) + .setQueuedResource(queuedResource) + .build(); + // You can wait until TPU Node is READY, + // and check its status using getTpuVm() from "tpu_vm_get" sample. + + return tpuClient.createQueuedResourceAsync(request).get(); + } + } +} +// [END tpu_queued_resources_startup_script] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java index 3de8567857d..f619889001c 100644 --- a/tpu/src/main/java/tpu/DeleteForceQueuedResource.java +++ b/tpu/src/main/java/tpu/DeleteForceQueuedResource.java @@ -18,7 +18,6 @@ //[START tpu_queued_resources_delete_force] import com.google.api.gax.retrying.RetrySettings; -import com.google.api.gax.rpc.UnknownException; import com.google.cloud.tpu.v2alpha1.DeleteQueuedResourceRequest; import com.google.cloud.tpu.v2alpha1.TpuClient; import com.google.cloud.tpu.v2alpha1.TpuSettings; @@ -27,12 +26,13 @@ import org.threeten.bp.Duration; public class DeleteForceQueuedResource { - public static void main(String[] args) { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { // TODO(developer): Replace these variables before running the sample. // Project ID or project number of the Google Cloud project. String projectId = "YOUR_PROJECT_ID"; // The zone in which the TPU was created. - String zone = "europe-west4-a"; + String zone = "us-central1-f"; // The name for your Queued Resource. String queuedResourceId = "QUEUED_RESOURCE_ID"; @@ -41,7 +41,8 @@ public static void main(String[] args) { // Deletes a Queued Resource asynchronously with --force flag. public static void deleteForceQueuedResource( - String projectId, String zone, String queuedResourceId) { + String projectId, String zone, String queuedResourceId) + throws ExecutionException, InterruptedException, IOException { String name = String.format("projects/%s/locations/%s/queuedResources/%s", projectId, zone, queuedResourceId); // With these settings the client library handles the Operation's polling mechanism @@ -65,13 +66,12 @@ public static void deleteForceQueuedResource( try (TpuClient tpuClient = TpuClient.create(clientSettings.build())) { DeleteQueuedResourceRequest request = DeleteQueuedResourceRequest.newBuilder().setName(name).setForce(true).build(); - + // Waiting for updates in the library. Until then, the operation will complete successfully, + // but the user will receive an error message with UnknownException and IllegalStateException. tpuClient.deleteQueuedResourceAsync(request).get(); - } catch (UnknownException | InterruptedException | ExecutionException | IOException e) { - System.out.println(e.getMessage()); + System.out.printf("Deleted Queued Resource: %s\n", name); } - System.out.printf("Deleted Queued Resource: %s\n", name); } } //[END tpu_queued_resources_delete_force] diff --git a/tpu/src/test/java/tpu/QueuedResourceIT.java b/tpu/src/test/java/tpu/QueuedResourceIT.java index ec7d9512b92..15dd2e768cc 100644 --- a/tpu/src/test/java/tpu/QueuedResourceIT.java +++ b/tpu/src/test/java/tpu/QueuedResourceIT.java @@ -16,7 +16,6 @@ package tpu; -import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -32,10 +31,8 @@ import com.google.cloud.tpu.v2alpha1.QueuedResource; import com.google.cloud.tpu.v2alpha1.TpuClient; import com.google.cloud.tpu.v2alpha1.TpuSettings; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; -import org.junit.jupiter.api.BeforeAll; +import java.util.concurrent.ExecutionException; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.runner.RunWith; @@ -52,13 +49,6 @@ public class QueuedResourceIT { private static final String TPU_SOFTWARE_VERSION = "tpu-vm-tf-2.14.1"; private static final String QUEUED_RESOURCE_NAME = "queued-resource"; private static final String NETWORK_NAME = "default"; - private static ByteArrayOutputStream bout; - - @BeforeAll - public static void setUp() { - bout = new ByteArrayOutputStream(); - System.setOut(new PrintStream(bout)); - } @Test public void testCreateQueuedResourceWithSpecifiedNetwork() throws Exception { @@ -105,7 +95,8 @@ public void testGetQueuedResource() throws IOException { } @Test - public void testDeleteForceQueuedResource() { + public void testDeleteForceQueuedResource() + throws IOException, InterruptedException, ExecutionException { try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { TpuClient mockTpuClient = mock(TpuClient.class); OperationFuture mockFuture = mock(OperationFuture.class); @@ -116,11 +107,33 @@ public void testDeleteForceQueuedResource() { .thenReturn(mockFuture); DeleteForceQueuedResource.deleteForceQueuedResource(PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME); - String output = bout.toString(); - assertThat(output).contains("Deleted Queued Resource:"); verify(mockTpuClient, times(1)) .deleteQueuedResourceAsync(any(DeleteQueuedResourceRequest.class)); } } + + @Test + public void testCreateQueuedResourceWithStartupScript() throws Exception { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + QueuedResource mockQueuedResource = mock(QueuedResource.class); + TpuClient mockTpuClient = mock(TpuClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.createQueuedResourceAsync(any(CreateQueuedResourceRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockQueuedResource); + + QueuedResource returnedQueuedResource = + CreateQueuedResourceWithStartupScript.createQueuedResource( + PROJECT_ID, ZONE, QUEUED_RESOURCE_NAME, NODE_NAME, + TPU_TYPE, TPU_SOFTWARE_VERSION); + + verify(mockTpuClient, times(1)) + .createQueuedResourceAsync(any(CreateQueuedResourceRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedQueuedResource, mockQueuedResource); + } + } } \ No newline at end of file From 535a64008860af1e8c44c01078ba91c33cf1c7e3 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 26 Nov 2024 12:41:20 -0500 Subject: [PATCH 28/66] fix: fix channel priming by adding missing halfClose() (#9737) --- .../cloud/bigtable/examples/proxy/channelpool/DataChannel.java | 1 + 1 file changed, 1 insertion(+) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index e8c91769a19..d5add6069e5 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -201,6 +201,7 @@ public void onClose(Status status, Metadata trailers) { }, metadata); call.sendMessage(request); + call.halfClose(); call.request(Integer.MAX_VALUE); return f; From 772ba0c0cf29fdc1de5f078fd6fd65a2b16f248b Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 26 Nov 2024 13:16:03 -0500 Subject: [PATCH 29/66] fix PingAndWarm request params (#9738) --- .../cloud/bigtable/examples/proxy/channelpool/DataChannel.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index d5add6069e5..a43af3aaf0a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -173,7 +173,7 @@ private ListenableFuture sendPingAndWarm(PingAndWarmRequest metadata.put( CallLabels.REQUEST_PARAMS, String.format( - "name=projects/%s/instances/%s", + "name=%s&app_profile_id=%s", URLEncoder.encode(request.getName(), StandardCharsets.UTF_8), URLEncoder.encode(request.getAppProfileId(), StandardCharsets.UTF_8))); From ebf78d9ce278db8c084979f82fa578e83695b2b2 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 26 Nov 2024 17:15:42 -0500 Subject: [PATCH 30/66] fix: bigtable-proxy verify command metric checks (#9741) --- .../cloud/bigtable/examples/proxy/commands/Verify.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java index d682cec4f7a..5e813b0d454 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -59,6 +59,7 @@ import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; import io.opentelemetry.sdk.resources.Resource; import java.io.IOException; +import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Iterator; @@ -185,9 +186,10 @@ private void checkBigtable(CallCredentials callCredentials, String tableName) { } } - void checkMetrics(Credentials creds) throws IOException { - Instant now = Instant.now().truncatedTo(ChronoUnit.MINUTES); + void checkMetrics(Credentials creds) { Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); + Instant start = end.minus(Duration.ofMinutes(1)); + GCPResourceProvider resourceProvider = new GCPResourceProvider(); Resource resource = Resource.create(resourceProvider.getAttributes()); @@ -211,7 +213,7 @@ void checkMetrics(Credentials creds) throws IOException { ImmutableGaugeData.create( ImmutableList.of( ImmutableLongPointData.create( - TimeUnit.MILLISECONDS.toNanos(now.toEpochMilli()), + TimeUnit.MILLISECONDS.toNanos(start.toEpochMilli()), TimeUnit.MILLISECONDS.toNanos(end.toEpochMilli()), Attributes.empty(), 1L))))); From b7781fd489cc9b530ce22cc1ddca80e907d9abd0 Mon Sep 17 00:00:00 2001 From: Kevin Kim Date: Wed, 27 Nov 2024 01:10:42 -0500 Subject: [PATCH 31/66] Update CODEOWNERS (#9734) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f21afe0f659..2a3f9fc7cee 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -54,7 +54,7 @@ # Cloud SDK Databases & Data Analytics teams # ---* Cloud Native DB -/bigtable @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-native-db-dpes +/bigtable @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-native-db-dpes @GoogleCloudPlatform/bigtable-eng /memorystore @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers /spanner @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/api-spanner-java # ---* Cloud Storage From 484fce81fd22a166d35dacee9e92efe78c0d867d Mon Sep 17 00:00:00 2001 From: Jacek Spalinski <69755075+jacspa96@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:08:30 +0100 Subject: [PATCH 32/66] feat(dataplex): add quickstart for dataplex (#9618) * feat(dataplex): create mvn project for dataplex quickstart * feat(dataplex): create quickstart guide for dataplex * feat(dataplex): add integration test for quickstart * feat(dataplex): add method comment for quickstart * feat(dataplex): add Search to quickstart * feat(dataplex): fix typos * feat(dataplex): sleep execution to allow resources to propagate * feat(dataplex): refactor test * feat(dataplex): Move variables declaration to pass lint * feat(dataplex): Enforce resource deletion before and after tests * feat(dataplex): Pass resources' IDs as method arguments --------- Co-authored-by: Jacek Spalinski --- dataplex/quickstart/pom.xml | 58 ++++ .../src/main/java/dataplex/Quickstart.java | 251 ++++++++++++++++++ .../src/test/java/dataplex/QuickstartIT.java | 131 +++++++++ 3 files changed, 440 insertions(+) create mode 100644 dataplex/quickstart/pom.xml create mode 100644 dataplex/quickstart/src/main/java/dataplex/Quickstart.java create mode 100644 dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java diff --git a/dataplex/quickstart/pom.xml b/dataplex/quickstart/pom.xml new file mode 100644 index 00000000000..07173434647 --- /dev/null +++ b/dataplex/quickstart/pom.xml @@ -0,0 +1,58 @@ + + + 4.0.0 + + dataplex + dataplex-quickstart + jar + Google Dataplex Quickstart + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + 11 + 11 + UTF-8 + + + + + + com.google.cloud + libraries-bom + 26.49.0 + pom + import + + + + + + + com.google.cloud + google-cloud-dataplex + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.4 + test + + + diff --git a/dataplex/quickstart/src/main/java/dataplex/Quickstart.java b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java new file mode 100644 index 00000000000..16afff3660d --- /dev/null +++ b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java @@ -0,0 +1,251 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +// [START dataplex_quickstart] +import com.google.cloud.dataplex.v1.Aspect; +import com.google.cloud.dataplex.v1.AspectType; +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import com.google.cloud.dataplex.v1.Entry; +import com.google.cloud.dataplex.v1.EntryGroup; +import com.google.cloud.dataplex.v1.EntryGroupName; +import com.google.cloud.dataplex.v1.EntryName; +import com.google.cloud.dataplex.v1.EntrySource; +import com.google.cloud.dataplex.v1.EntryType; +import com.google.cloud.dataplex.v1.EntryView; +import com.google.cloud.dataplex.v1.GetEntryRequest; +import com.google.cloud.dataplex.v1.LocationName; +import com.google.cloud.dataplex.v1.SearchEntriesRequest; +import com.google.cloud.dataplex.v1.SearchEntriesResult; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +public class Quickstart { + + public static void main(String[] args) { + // TODO(developer): Replace these variables before running the sample. + String projectId = "MY_PROJECT_ID"; + // Available locations: https://cloud.google.com/dataplex/docs/locations + String location = "MY_LOCATION"; + // Variables below can be replaced with custom values or defaults can be kept + String aspectTypeId = "dataplex-quickstart-aspect-type"; + String entryTypeId = "dataplex-quickstart-entry-type"; + String entryGroupId = "dataplex-quickstart-entry-group"; + String entryId = "dataplex-quickstart-entry"; + + quickstart(projectId, location, aspectTypeId, entryTypeId, entryGroupId, entryId); + } + + // Method to demonstrate lifecycle of different Dataplex resources and their interactions. + // Method creates Aspect Type, Entry Type, Entry Group and Entry, retrieves Entry + // and cleans up created resources. + public static void quickstart( + String projectId, + String location, + String aspectTypeId, + String entryTypeId, + String entryGroupId, + String entryId) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (CatalogServiceClient client = CatalogServiceClient.create()) { + // 0) Prepare variables used in following steps + LocationName globalLocationName = LocationName.of(projectId, "global"); + LocationName specificLocationName = LocationName.of(projectId, location); + + // 1) Create Aspect Type that will be attached to Entry Type + AspectType.MetadataTemplate aspectField = + AspectType.MetadataTemplate.newBuilder() + // The name must follow regex ^(([a-zA-Z]{1})([\\w\\-_]{0,62}))$ + // That means name must only contain alphanumeric character or dashes or underscores, + // start with an alphabet, and must be less than 63 characters. + .setName("example_field") + // Metadata Template is recursive structure, + // primitive types such as "string" or "integer" indicate leaf node, + // complex types such as "record" or "array" would require nested Metadata Template + .setType("string") + .setIndex(1) + .setAnnotations( + AspectType.MetadataTemplate.Annotations.newBuilder() + .setDescription("example field to be filled during entry creation") + .build()) + .setConstraints( + AspectType.MetadataTemplate.Constraints.newBuilder() + // Specifies if field will be required in Aspect Type. + .setRequired(true) + .build()) + .build(); + AspectType aspectType = + AspectType.newBuilder() + .setDescription("aspect type for dataplex quickstart") + .setMetadataTemplate( + AspectType.MetadataTemplate.newBuilder() + .setName("example_template") + .setType("record") + // Aspect Type fields, that themselves are Metadata Templates + .addAllRecordFields(List.of(aspectField)) + .build()) + .build(); + AspectType createdAspectType = + client + .createAspectTypeAsync( + // Aspect Type is created in "global" location to highlight, that resources from + // "global" region can be attached to Entry created in specific location + globalLocationName, aspectType, aspectTypeId) + .get(); + System.out.println("Step 1: Created aspect type -> " + createdAspectType.getName()); + + // 2) Create Entry Type, of which type Entry will be created + EntryType entryType = + EntryType.newBuilder() + .setDescription("entry type for dataplex quickstart") + .addRequiredAspects( + EntryType.AspectInfo.newBuilder() + // Aspect Type created in step 1 + .setType( + String.format( + "projects/%s/locations/global/aspectTypes/%s", + projectId, aspectTypeId)) + .build()) + .build(); + EntryType createdEntryType = + client + // Entry Type is created in "global" location to highlight, that resources from + // "global" region can be attached to Entry created in specific location + .createEntryTypeAsync(globalLocationName, entryType, entryTypeId) + .get(); + System.out.println("Step 2: Created entry type -> " + createdEntryType.getName()); + + // 3) Create Entry Group in which Entry will be located + EntryGroup entryGroup = + EntryGroup.newBuilder().setDescription("entry group for dataplex quickstart").build(); + EntryGroup createdEntryGroup = + client + // Entry Group is created for specific location + .createEntryGroupAsync(specificLocationName, entryGroup, entryGroupId) + .get(); + System.out.println("Step 3: Created entry group -> " + createdEntryGroup.getName()); + + // 4) Create Entry + // Wait 10 second to allow previously created resources to propagate + Thread.sleep(10000); + String aspectKey = String.format("%s.global.%s", projectId, aspectTypeId); + Entry entry = + Entry.newBuilder() + .setEntryType( + // Entry is an instance of Entry Type created in step 2 + String.format( + "projects/%s/locations/global/entryTypes/%s", projectId, entryTypeId)) + .setEntrySource( + EntrySource.newBuilder().setDescription("entry for dataplex quickstart").build()) + .putAllAspects( + Map.of( + // Attach Aspect that is an instance of Aspect Type created in step 1 + aspectKey, + Aspect.newBuilder() + .setAspectType( + String.format( + "projects/%s/locations/global/aspectTypes/%s", + projectId, aspectTypeId)) + .setData( + Struct.newBuilder() + .putFields( + "example_field", + Value.newBuilder() + .setStringValue("example value for the field") + .build()) + .build()) + .build())) + .build(); + Entry createdEntry = + client.createEntry( + // Entry is created in specific location, but it is still possible to link it with + // resources (Aspect Type and Entry Type) from "global" location + EntryGroupName.of(projectId, location, entryGroupId), entry, entryId); + System.out.println("Step 4: Created entry -> " + createdEntry.getName()); + + // 5) Retrieve created Entry + GetEntryRequest getEntryRequest = + GetEntryRequest.newBuilder() + .setName(EntryName.of(projectId, location, entryGroupId, entryId).toString()) + .setView(EntryView.FULL) + .build(); + Entry retrievedEntry = client.getEntry(getEntryRequest); + System.out.println("Step 5: Retrieved entry -> " + retrievedEntry.getName()); + retrievedEntry + .getAspectsMap() + .values() + .forEach( + retrievedAspect -> { + System.out.println("Retrieved aspect for entry:"); + System.out.println(" * aspect type -> " + retrievedAspect.getAspectType()); + System.out.println( + " * aspect field value -> " + + retrievedAspect + .getData() + .getFieldsMap() + .get("example_field") + .getStringValue()); + }); + + // 6) Use Search capabilities to find Entry + // Wait 30 second to allow resources to propagate to Search + System.out.println("Step 6: Waiting for resources to propagate to Search..."); + Thread.sleep(30000); + SearchEntriesRequest searchEntriesRequest = + SearchEntriesRequest.newBuilder() + .setName(globalLocationName.toString()) + .setQuery("name:dataplex-quickstart-entry") + .build(); + CatalogServiceClient.SearchEntriesPagedResponse searchEntriesResponse = + client.searchEntries(searchEntriesRequest); + List entriesFromSearch = + searchEntriesResponse.getPage().getResponse().getResultsList().stream() + .map(SearchEntriesResult::getDataplexEntry) + .collect(Collectors.toList()); + System.out.println("Entries found in Search:"); + // Please note in output that Entry Group and Entry Type are also represented as Entries + entriesFromSearch.forEach( + entryFromSearch -> System.out.println(" * " + entryFromSearch.getName())); + + // 7) Clean created resources + client + .deleteEntryGroupAsync( + String.format( + "projects/%s/locations/%s/entryGroups/%s", projectId, location, entryGroupId)) + .get(); + client + .deleteEntryTypeAsync( + String.format("projects/%s/locations/global/entryTypes/%s", projectId, entryTypeId)) + .get(); + client + .deleteAspectTypeAsync( + String.format("projects/%s/locations/global/aspectTypes/%s", projectId, aspectTypeId)) + .get(); + System.out.println("Step 7: Successfully cleaned up resources"); + + } catch (IOException | InterruptedException | ExecutionException e) { + System.err.println("Error during quickstart execution: " + e); + } + } +} +// [END dataplex_quickstart] diff --git a/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java new file mode 100644 index 00000000000..9c2835cdd8f --- /dev/null +++ b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java @@ -0,0 +1,131 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package dataplex; + +import static com.google.common.truth.Truth.assertThat; +import static junit.framework.TestCase.assertNotNull; + +import com.google.cloud.dataplex.v1.CatalogServiceClient; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.util.List; +import java.util.UUID; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class QuickstartIT { + private static final String ID = UUID.randomUUID().toString().substring(0, 8); + private static final String LOCATION = "us-central1"; + private static final String PROJECT_ID = requireProjectIdEnvVar(); + private static ByteArrayOutputStream bout; + private static PrintStream originalPrintStream; + private static final String ASPECT_TYPE_ID = "quickstart-aspect-type-" + ID; + private static final String ENTRY_TYPE_ID = "quickstart-entry-type-" + ID; + private static final String ENTRY_GROUP_ID = "quickstart-entry-group-" + ID; + private static final String ENTRY_ID = "quickstart-entry-" + ID; + + private static String requireProjectIdEnvVar() { + String value = System.getenv("GOOGLE_CLOUD_PROJECT"); + assertNotNull( + "Environment variable GOOGLE_CLOUD_PROJECT is required to perform these tests.", value); + return value; + } + + private static void forceCleanResources() throws IOException { + try (CatalogServiceClient client = CatalogServiceClient.create()) { + try { + client + .deleteEntryGroupAsync( + String.format( + "projects/%s/locations/%s/entryGroups/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + try { + client + .deleteEntryTypeAsync( + String.format( + "projects/%s/locations/global/entryTypes/%s", PROJECT_ID, ENTRY_TYPE_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + try { + client + .deleteAspectTypeAsync( + String.format( + "projects/%s/locations/global/aspectTypes/%s", PROJECT_ID, ASPECT_TYPE_ID)) + .get(); + } catch (Exception e) { + // Pass, no resource to delete + } + } + } + + @BeforeClass + public static void setUp() { + requireProjectIdEnvVar(); + // Re-direct print stream to capture logging + bout = new ByteArrayOutputStream(); + originalPrintStream = System.out; + System.setOut(new PrintStream(bout)); + } + + @Test + public void testQuickstart() { + List expectedLogs = + List.of( + String.format( + "Step 1: Created aspect type -> projects/%s/locations/global/aspectTypes/%s", + PROJECT_ID, ASPECT_TYPE_ID), + String.format( + "Step 2: Created entry type -> projects/%s/locations/global/entryTypes/%s", + PROJECT_ID, ENTRY_TYPE_ID), + String.format( + "Step 3: Created entry group -> projects/%s/locations/%s/entryGroups/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID), + String.format( + "Step 4: Created entry -> projects/%s/locations/%s/entryGroups/%s/entries/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + String.format( + "Step 5: Retrieved entry -> projects/%s/locations/%s/entryGroups/%s/entries/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + // Step 6 - result from Search + String.format( + "projects/%s/locations/%s/entryGroups/%s/entries/%s", + PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + "Step 7: Successfully cleaned up resources"); + + Quickstart.quickstart( + PROJECT_ID, LOCATION, ASPECT_TYPE_ID, ENTRY_TYPE_ID, ENTRY_GROUP_ID, ENTRY_ID); + String output = bout.toString(); + + expectedLogs.forEach(expectedLog -> assertThat(output).contains(expectedLog)); + } + + @AfterClass + public static void tearDown() throws IOException { + forceCleanResources(); + // Restore print statements + System.setOut(originalPrintStream); + bout.reset(); + } +} From f82460a364cdeaf6c7e805334553f874045115da Mon Sep 17 00:00:00 2001 From: lovenishs04 Date: Thu, 28 Nov 2024 04:40:45 +0000 Subject: [PATCH 33/66] feat(securitycenter): Add Resource SCC Management API Org SHA Custom Module code samples (Update, Get Eff, List Eff, List Desc, Simulate) (#9683) * sample codes for security health analytics modules * fixed lint error * fixing testcase * fixing testcase * fixing testcase * addressed comments * addressed comments * addressed comments --- ...teSecurityHealthAnalyticsCustomModule.java | 17 +-- ...teSecurityHealthAnalyticsCustomModule.java | 13 +- ...veSecurityHealthAnalyticsCustomModule.java | 61 +++++++++ ...etSecurityHealthAnalyticsCustomModule.java | 12 +- ...tSecurityHealthAnalyticsCustomModules.java | 55 ++++++++ ...eSecurityHealthAnalyticsCustomModules.java | 55 ++++++++ ...tSecurityHealthAnalyticsCustomModules.java | 12 +- ...teSecurityHealthAnalyticsCustomModule.java | 118 ++++++++++++++++++ ...teSecurityHealthAnalyticsCustomModule.java | 76 +++++++++++ ...curityHealthAnalyticsCustomModuleTest.java | 114 +++++++++++++---- .../snippets/src/test/java/vtwo/IamIT.java | 2 +- 11 files changed, 488 insertions(+), 47 deletions(-) create mode 100644 security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java create mode 100644 security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java create mode 100644 security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java create mode 100644 security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java create mode 100644 security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java diff --git a/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java index 403670363c4..11c5ae45fa4 100644 --- a/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java +++ b/security-command-center/snippets/src/main/java/management/api/CreateSecurityHealthAnalyticsCustomModule.java @@ -31,16 +31,16 @@ public class CreateSecurityHealthAnalyticsCustomModule { public static void main(String[] args) throws IOException { // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/create - // replace "project_id" with a real project ID - String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; String customModuleDisplayName = "custom_module_display_name"; - createSecurityHealthAnalyticsCustomModule(parent, customModuleDisplayName); + createSecurityHealthAnalyticsCustomModule(projectId, customModuleDisplayName); } public static SecurityHealthAnalyticsCustomModule createSecurityHealthAnalyticsCustomModule( - String parent, String customModuleDisplayName) throws IOException { + String projectId, String customModuleDisplayName) throws IOException { // Initialize client that will be used to send requests. This client only needs // to be created @@ -48,9 +48,12 @@ public static SecurityHealthAnalyticsCustomModule createSecurityHealthAnalyticsC try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { String name = - String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, "custom_module"); + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, "custom_module"); - // define the CEL expression here, change it according to the your requirements + // define the CEL expression here and this will scans for keys that have not been rotated in + // the last 30 days, change it according to the your requirements Expr expr = Expr.newBuilder() .setExpression( @@ -87,7 +90,7 @@ public static SecurityHealthAnalyticsCustomModule createSecurityHealthAnalyticsC CreateSecurityHealthAnalyticsCustomModuleRequest request = CreateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() - .setParent(parent) + .setParent(String.format("projects/%s/locations/global", projectId)) .setSecurityHealthAnalyticsCustomModule(securityHealthAnalyticsCustomModule) .build(); diff --git a/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java index 071ea1bfb58..61d51cc3262 100644 --- a/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java +++ b/security-command-center/snippets/src/main/java/management/api/DeleteSecurityHealthAnalyticsCustomModule.java @@ -25,23 +25,26 @@ public class DeleteSecurityHealthAnalyticsCustomModule { public static void main(String[] args) throws IOException { // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/delete - // replace "project_id" with a real project ID - String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; String customModuleId = "custom_module_id"; - deleteSecurityHealthAnalyticsCustomModule(parent, customModuleId); + deleteSecurityHealthAnalyticsCustomModule(projectId, customModuleId); } public static boolean deleteSecurityHealthAnalyticsCustomModule( - String parent, String customModuleId) throws IOException { + String projectId, String customModuleId) throws IOException { // Initialize client that will be used to send requests. This client only needs // to be created // once, and can be reused for multiple requests. try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + String name = - String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, customModuleId); + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); DeleteSecurityHealthAnalyticsCustomModuleRequest request = DeleteSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); diff --git a/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..8fde10c20f8 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/GetEffectiveSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,61 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_get_effective_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.EffectiveSecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.GetEffectiveSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import java.io.IOException; + +public class GetEffectiveSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.effectiveSecurityHealthAnalyticsCustomModules/get + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + getEffectiveSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static EffectiveSecurityHealthAnalyticsCustomModule + getEffectiveSecurityHealthAnalyticsCustomModule(String projectId, String customModuleId) + throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/effectiveSecurityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + GetEffectiveSecurityHealthAnalyticsCustomModuleRequest request = + GetEffectiveSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); + + EffectiveSecurityHealthAnalyticsCustomModule response = + client.getEffectiveSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_get_effective_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java index 0270d9e1307..8e149656aea 100644 --- a/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java +++ b/security-command-center/snippets/src/main/java/management/api/GetSecurityHealthAnalyticsCustomModule.java @@ -26,16 +26,16 @@ public class GetSecurityHealthAnalyticsCustomModule { public static void main(String[] args) throws IOException { // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/get - // replace "project_id" with a real project ID - String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; String customModuleId = "custom_module_id"; - getSecurityHealthAnalyticsCustomModule(parent, customModuleId); + getSecurityHealthAnalyticsCustomModule(projectId, customModuleId); } public static SecurityHealthAnalyticsCustomModule getSecurityHealthAnalyticsCustomModule( - String parent, String customModuleId) throws IOException { + String projectId, String customModuleId) throws IOException { // Initialize client that will be used to send requests. This client only needs // to be created @@ -43,7 +43,9 @@ public static SecurityHealthAnalyticsCustomModule getSecurityHealthAnalyticsCust try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { String name = - String.format("%s/securityHealthAnalyticsCustomModules/%s", parent, customModuleId); + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); GetSecurityHealthAnalyticsCustomModuleRequest request = GetSecurityHealthAnalyticsCustomModuleRequest.newBuilder().setName(name).build(); diff --git a/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..ae39a37deb5 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListDescendantSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_descendant_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListDescendantSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListDescendantSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/listDescendant + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + listDescendantSecurityHealthAnalyticsCustomModules(projectId); + } + + public static ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse + listDescendantSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListDescendantSecurityHealthAnalyticsCustomModulesRequest request = + ListDescendantSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); + + ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listDescendantSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_descendant_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java new file mode 100644 index 00000000000..8e4da2917d9 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/ListEffectiveSecurityHealthAnalyticsCustomModules.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_list_effective_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.ListEffectiveSecurityHealthAnalyticsCustomModulesRequest; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse; +import java.io.IOException; + +public class ListEffectiveSecurityHealthAnalyticsCustomModules { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.effectiveSecurityHealthAnalyticsCustomModules/list + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + listEffectiveSecurityHealthAnalyticsCustomModules(projectId); + } + + public static ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse + listEffectiveSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + ListEffectiveSecurityHealthAnalyticsCustomModulesRequest request = + ListEffectiveSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); + + ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse response = + client.listEffectiveSecurityHealthAnalyticsCustomModules(request); + + return response; + } + } +} +// [END securitycenter_list_effective_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java index cae1e227665..f3d994f9c60 100644 --- a/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java +++ b/security-command-center/snippets/src/main/java/management/api/ListSecurityHealthAnalyticsCustomModules.java @@ -26,21 +26,23 @@ public class ListSecurityHealthAnalyticsCustomModules { public static void main(String[] args) throws IOException { // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/list - // replace "project_id" with a real project ID - String parent = String.format("projects/%s/locations/%s", "project_id", "global"); + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; - listSecurityHealthAnalyticsCustomModules(parent); + listSecurityHealthAnalyticsCustomModules(projectId); } public static ListSecurityHealthAnalyticsCustomModulesPagedResponse - listSecurityHealthAnalyticsCustomModules(String parent) throws IOException { + listSecurityHealthAnalyticsCustomModules(String projectId) throws IOException { // Initialize client that will be used to send requests. This client only needs // to be created // once, and can be reused for multiple requests. try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { ListSecurityHealthAnalyticsCustomModulesRequest request = - ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder().setParent(parent).build(); + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .build(); ListSecurityHealthAnalyticsCustomModulesPagedResponse response = client.listSecurityHealthAnalyticsCustomModules(request); diff --git a/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..c9b2a79c42d --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/SimulateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,118 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_simulate_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.CustomConfig; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.ResourceSelector; +import com.google.cloud.securitycentermanagement.v1.CustomConfig.Severity; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleRequest.SimulatedResource; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleResponse; +import com.google.iam.v1.Binding; +import com.google.iam.v1.Policy; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.google.type.Expr; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class SimulateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/simulate + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + simulateSecurityHealthAnalyticsCustomModule(projectId); + } + + public static SimulateSecurityHealthAnalyticsCustomModuleResponse + simulateSecurityHealthAnalyticsCustomModule(String projectId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + // define the CEL expression here and this will scans for keys that have not been rotated in + // the last 30 days, change it according to the your requirements + Expr expr = + Expr.newBuilder() + .setExpression( + "has(resource.rotationPeriod) && (resource.rotationPeriod > " + + "duration('2592000s'))") + .build(); + + // define the resource selector + ResourceSelector resourceSelector = + ResourceSelector.newBuilder() + .addResourceTypes("cloudkms.googleapis.com/CryptoKey") + .build(); + + // define the custom module configuration, update the severity, description, + // recommendation below + CustomConfig customConfig = + CustomConfig.newBuilder() + .setPredicate(expr) + .setResourceSelector(resourceSelector) + .setSeverity(Severity.MEDIUM) + .setDescription("add your description here") + .setRecommendation("add your recommendation here") + .build(); + + // define the simulated resource data + Map resourceData = new HashMap<>(); + resourceData.put("resourceId", Value.newBuilder().setStringValue("test-resource-id").build()); + resourceData.put("name", Value.newBuilder().setStringValue("test-resource-name").build()); + Struct resourceDataStruct = Struct.newBuilder().putAllFields(resourceData).build(); + + // define the policy + Policy policy = + Policy.newBuilder() + .addBindings( + Binding.newBuilder() + .setRole("roles/owner") + .addMembers("user:test-user@gmail.com") + .build()) + .build(); + + // replace with the correct resource type + SimulatedResource simulatedResource = + SimulatedResource.newBuilder() + .setResourceType("cloudkms.googleapis.com/CryptoKey") + .setResourceData(resourceDataStruct) + .setIamPolicyData(policy) + .build(); + + SimulateSecurityHealthAnalyticsCustomModuleRequest request = + SimulateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", projectId)) + .setCustomConfig(customConfig) + .setResource(simulatedResource) + .build(); + + SimulateSecurityHealthAnalyticsCustomModuleResponse response = + client.simulateSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_simulate_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java b/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java new file mode 100644 index 00000000000..1a92299f896 --- /dev/null +++ b/security-command-center/snippets/src/main/java/management/api/UpdateSecurityHealthAnalyticsCustomModule.java @@ -0,0 +1,76 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package management.api; + +// [START securitycenter_update_security_health_analytics_custom_module] +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.cloud.securitycentermanagement.v1.UpdateSecurityHealthAnalyticsCustomModuleRequest; +import com.google.protobuf.FieldMask; +import java.io.IOException; + +public class UpdateSecurityHealthAnalyticsCustomModule { + + public static void main(String[] args) throws IOException { + // https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules/patch + // TODO: Developer should replace project_id with a real project ID before running this code + String projectId = "project_id"; + + String customModuleId = "custom_module_id"; + + updateSecurityHealthAnalyticsCustomModule(projectId, customModuleId); + } + + public static SecurityHealthAnalyticsCustomModule updateSecurityHealthAnalyticsCustomModule( + String projectId, String customModuleId) throws IOException { + + // Initialize client that will be used to send requests. This client only needs + // to be created + // once, and can be reused for multiple requests. + try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { + + String name = + String.format( + "projects/%s/locations/global/securityHealthAnalyticsCustomModules/%s", + projectId, customModuleId); + + // Define the security health analytics custom module configuration, update the + // EnablementState accordingly. + SecurityHealthAnalyticsCustomModule securityHealthAnalyticsCustomModule = + SecurityHealthAnalyticsCustomModule.newBuilder() + .setName(name) + .setEnablementState(EnablementState.DISABLED) + .build(); + + // Set the field mask to specify which properties should be updated. + FieldMask fieldMask = FieldMask.newBuilder().addPaths("enablement_state").build(); + + UpdateSecurityHealthAnalyticsCustomModuleRequest request = + UpdateSecurityHealthAnalyticsCustomModuleRequest.newBuilder() + .setSecurityHealthAnalyticsCustomModule(securityHealthAnalyticsCustomModule) + .setUpdateMask(fieldMask) + .build(); + + SecurityHealthAnalyticsCustomModule response = + client.updateSecurityHealthAnalyticsCustomModule(request); + + return response; + } + } +} +// [END securitycenter_update_security_health_analytics_custom_module] diff --git a/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java index d35cc085be6..51040b2b012 100644 --- a/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java +++ b/security-command-center/snippets/src/test/java/management/api/SecurityHealthAnalyticsCustomModuleTest.java @@ -21,15 +21,21 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import com.google.cloud.securitycentermanagement.v1.EffectiveSecurityHealthAnalyticsCustomModule; import com.google.cloud.securitycentermanagement.v1.ListSecurityHealthAnalyticsCustomModulesRequest; import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse; +import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse; import com.google.cloud.securitycentermanagement.v1.SecurityCenterManagementClient.ListSecurityHealthAnalyticsCustomModulesPagedResponse; import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule; +import com.google.cloud.securitycentermanagement.v1.SecurityHealthAnalyticsCustomModule.EnablementState; +import com.google.cloud.securitycentermanagement.v1.SimulateSecurityHealthAnalyticsCustomModuleResponse; import com.google.cloud.testing.junit4.MultipleAttemptsRule; import com.google.common.base.Strings; import java.io.IOException; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.StreamSupport; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -39,9 +45,8 @@ @RunWith(JUnit4.class) public class SecurityHealthAnalyticsCustomModuleTest { - - private static final String parent = - String.format("organizations/%s/locations/%s", System.getenv("SCC_PROJECT_ORG_ID"), "global"); + // TODO(Developer): Replace the below variable + private static final String PROJECT_ID = System.getenv("SCC_PROJECT_ID"); private static final String CUSTOM_MODULE_DISPLAY_NAME = "java_sample_custom_module_test"; private static final int MAX_ATTEMPT_COUNT = 3; private static final int INITIAL_BACKOFF_MILLIS = 120000; // 2 minutes @@ -60,7 +65,7 @@ public static void requireEnvVar(String envVarName) { @BeforeClass public static void setUp() throws InterruptedException { requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - requireEnvVar("SCC_PROJECT_ORG_ID"); + requireEnvVar("SCC_PROJECT_ID"); } @AfterClass @@ -71,21 +76,19 @@ public static void cleanUp() throws IOException { // cleanupExistingCustomModules clean up all the existing custom module private static void cleanupExistingCustomModules() throws IOException { - try (SecurityCenterManagementClient client = SecurityCenterManagementClient.create()) { - ListSecurityHealthAnalyticsCustomModulesRequest request = - ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder().setParent(parent).build(); - + ListSecurityHealthAnalyticsCustomModulesRequest.newBuilder() + .setParent(String.format("projects/%s/locations/global", PROJECT_ID)) + .build(); ListSecurityHealthAnalyticsCustomModulesPagedResponse response = client.listSecurityHealthAnalyticsCustomModules(request); - // Iterate over the response and delete custom module one by one which start with // java_sample_custom_module for (SecurityHealthAnalyticsCustomModule module : response.iterateAll()) { if (module.getDisplayName().startsWith("java_sample_custom_module")) { String customModuleId = extractCustomModuleId(module.getName()); - deleteCustomModule(parent, customModuleId); + deleteCustomModule(PROJECT_ID, customModuleId); } } } @@ -105,21 +108,22 @@ private static String extractCustomModuleId(String customModuleFullName) { // createCustomModule method is for creating the custom module private static SecurityHealthAnalyticsCustomModule createCustomModule( - String parent, String customModuleDisplayName) throws IOException { - if (!Strings.isNullOrEmpty(parent) && !Strings.isNullOrEmpty(customModuleDisplayName)) { + String projectId, String customModuleDisplayName) throws IOException { + if (!Strings.isNullOrEmpty(projectId) && !Strings.isNullOrEmpty(customModuleDisplayName)) { SecurityHealthAnalyticsCustomModule response = CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( - parent, customModuleDisplayName); + projectId, customModuleDisplayName); return response; } return null; } // deleteCustomModule method is for deleting the custom module - private static void deleteCustomModule(String parent, String customModuleId) throws IOException { - if (!Strings.isNullOrEmpty(parent) && !Strings.isNullOrEmpty(customModuleId)) { + private static void deleteCustomModule(String projectId, String customModuleId) + throws IOException { + if (!Strings.isNullOrEmpty(projectId) && !Strings.isNullOrEmpty(customModuleId)) { DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( - parent, customModuleId); + projectId, customModuleId); } } @@ -127,7 +131,7 @@ private static void deleteCustomModule(String parent, String customModuleId) thr public void testCreateSecurityHealthAnalyticsCustomModule() throws IOException { SecurityHealthAnalyticsCustomModule response = CreateSecurityHealthAnalyticsCustomModule.createSecurityHealthAnalyticsCustomModule( - parent, CUSTOM_MODULE_DISPLAY_NAME); + PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); assertNotNull(response); assertThat(response.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); @@ -136,30 +140,92 @@ public void testCreateSecurityHealthAnalyticsCustomModule() throws IOException { @Test public void testDeleteSecurityHealthAnalyticsCustomModule() throws IOException { SecurityHealthAnalyticsCustomModule response = - createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); String customModuleId = extractCustomModuleId(response.getName()); assertTrue( DeleteSecurityHealthAnalyticsCustomModule.deleteSecurityHealthAnalyticsCustomModule( - parent, customModuleId)); + PROJECT_ID, customModuleId)); } @Test public void testListSecurityHealthAnalyticsCustomModules() throws IOException { - createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); - assertNotNull( - ListSecurityHealthAnalyticsCustomModules.listSecurityHealthAnalyticsCustomModules(parent)); + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListSecurityHealthAnalyticsCustomModules.listSecurityHealthAnalyticsCustomModules( + PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); } @Test public void testGetSecurityHealthAnalyticsCustomModule() throws IOException { SecurityHealthAnalyticsCustomModule createCustomModuleResponse = - createCustomModule(parent, CUSTOM_MODULE_DISPLAY_NAME); + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); SecurityHealthAnalyticsCustomModule getCustomModuleResponse = GetSecurityHealthAnalyticsCustomModule.getSecurityHealthAnalyticsCustomModule( - parent, customModuleId); + PROJECT_ID, customModuleId); assertThat(getCustomModuleResponse.getDisplayName()).isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); assertThat(extractCustomModuleId(getCustomModuleResponse.getName())).isEqualTo(customModuleId); } + + @Test + public void testUpdateSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + SecurityHealthAnalyticsCustomModule response = + UpdateSecurityHealthAnalyticsCustomModule.updateSecurityHealthAnalyticsCustomModule( + PROJECT_ID, customModuleId); + assertNotNull(response); + assertThat(response.getEnablementState().equals(EnablementState.DISABLED)); + } + + @Test + public void testGetEffectiveSecurityHealthAnalyticsCustomModule() throws IOException { + SecurityHealthAnalyticsCustomModule createCustomModuleResponse = + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + String customModuleId = extractCustomModuleId(createCustomModuleResponse.getName()); + EffectiveSecurityHealthAnalyticsCustomModule getEffectiveCustomModuleResponse = + GetEffectiveSecurityHealthAnalyticsCustomModule + .getEffectiveSecurityHealthAnalyticsCustomModule(PROJECT_ID, customModuleId); + + assertThat(getEffectiveCustomModuleResponse.getDisplayName()) + .isEqualTo(CUSTOM_MODULE_DISPLAY_NAME); + assertThat(extractCustomModuleId(getEffectiveCustomModuleResponse.getName())) + .isEqualTo(customModuleId); + } + + @Test + public void testListEffectiveSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListEffectiveSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListEffectiveSecurityHealthAnalyticsCustomModules + .listEffectiveSecurityHealthAnalyticsCustomModules(PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); + } + + @Test + public void testListDescendantSecurityHealthAnalyticsCustomModules() throws IOException { + createCustomModule(PROJECT_ID, CUSTOM_MODULE_DISPLAY_NAME); + ListDescendantSecurityHealthAnalyticsCustomModulesPagedResponse response = + ListDescendantSecurityHealthAnalyticsCustomModules + .listDescendantSecurityHealthAnalyticsCustomModules(PROJECT_ID); + assertTrue( + StreamSupport.stream(response.iterateAll().spliterator(), false) + .anyMatch(module -> CUSTOM_MODULE_DISPLAY_NAME.equals(module.getDisplayName()))); + } + + @Test + public void testSimulateSecurityHealthAnalyticsCustomModule() throws IOException { + SimulateSecurityHealthAnalyticsCustomModuleResponse response = + SimulateSecurityHealthAnalyticsCustomModule.simulateSecurityHealthAnalyticsCustomModule( + PROJECT_ID); + assertNotNull(response); + assertThat(response.getResult().equals("no_violation")); + } } diff --git a/security-command-center/snippets/src/test/java/vtwo/IamIT.java b/security-command-center/snippets/src/test/java/vtwo/IamIT.java index a116cf6ee0f..8e95bf38a11 100644 --- a/security-command-center/snippets/src/test/java/vtwo/IamIT.java +++ b/security-command-center/snippets/src/test/java/vtwo/IamIT.java @@ -37,7 +37,7 @@ public class IamIT { private static final String ORGANIZATION_ID = System.getenv("SCC_PROJECT_ORG_ID"); - private static final String USER_EMAIL = "someuser@domain.com"; + private static final String USER_EMAIL = "example@domain.com"; private static final String USER_PERMISSION = "securitycenter.findings.update"; private static final String USER_ROLE = "roles/securitycenter.findingsEditor"; private static Source SOURCE; From 0b5e7fda780389279194a2d3e1cd83d408c23e28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Sat, 30 Nov 2024 18:05:39 +0100 Subject: [PATCH 34/66] feat(compute): add compute consistency group remove disk (#9682) * Implemented compute_consistency_group_create and compute_consistency_group_delete samples, created test * Implemented compute_consistency_group_add_disk sample * Implemented compute_consistency_group_add_disk sample, created test * Implemented compute_consistency_group_remove_disk sample, created test * Fixed code, created test with mocked client * Increased timeout * Fixed test * Fixed code * Increased timeout * Created test for Hyperdisk with mocked client * Fixed tests * Fixed comments * Fixed naming --- .../AddDiskToConsistencyGroup.java | 98 ++++++++++++ .../CreateConsistencyGroup.java | 75 +++++++++ .../DeleteConsistencyGroup.java | 59 +++++++ .../RemoveDiskFromConsistencyGroup.java | 100 ++++++++++++ .../compute/disks/ConsistencyGroupIT.java | 148 ++++++++++++++++++ .../java/compute/disks/CreateHyperdiskIT.java | 79 ++++++++++ .../test/java/compute/disks/HyperdiskIT.java | 132 ++++++++++++++++ .../test/java/compute/disks/HyperdisksIT.java | 141 ----------------- 8 files changed, 691 insertions(+), 141 deletions(-) create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java create mode 100644 compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java create mode 100644 compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java create mode 100644 compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java delete mode 100644 compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java new file mode 100644 index 00000000000..7c4650fad09 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/AddDiskToConsistencyGroup.java @@ -0,0 +1,98 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_add_disk] +import com.google.cloud.compute.v1.AddResourcePoliciesDiskRequest; +import com.google.cloud.compute.v1.AddResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.DisksAddResourcePoliciesRequest; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksAddResourcePoliciesRequest; +import com.google.cloud.compute.v1.RegionDisksClient; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class AddDiskToConsistencyGroup { + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project that contains the disk. + String project = "YOUR_PROJECT_ID"; + // Zone or region of the disk. + String location = "us-central1"; + // Name of the disk. + String diskName = "DISK_NAME"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + addDiskToConsistencyGroup( + project, location, diskName, consistencyGroupName, consistencyGroupLocation); + } + + // Adds a disk to a consistency group. + public static Operation.Status addDiskToConsistencyGroup( + String project, String location, String diskName, + String consistencyGroupName, String consistencyGroupLocation) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String consistencyGroupUrl = String.format( + "https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + Operation response; + if (Character.isDigit(location.charAt(location.length() - 1))) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + AddResourcePoliciesRegionDiskRequest request = + AddResourcePoliciesRegionDiskRequest.newBuilder() + .setDisk(diskName) + .setRegion(location) + .setProject(project) + .setRegionDisksAddResourcePoliciesRequestResource( + RegionDisksAddResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.addResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } else { + try (DisksClient disksClient = DisksClient.create()) { + AddResourcePoliciesDiskRequest request = + AddResourcePoliciesDiskRequest.newBuilder() + .setDisk(diskName) + .setZone(location) + .setProject(project) + .setDisksAddResourcePoliciesRequestResource( + DisksAddResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.addResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } + if (response.hasError()) { + throw new Error("Error adding disk to consistency group! " + response.getError()); + } + return response.getStatus(); + } +} +// [END compute_consistency_group_add_disk] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java new file mode 100644 index 00000000000..df6c324d8d5 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/CreateConsistencyGroup.java @@ -0,0 +1,75 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_create] +import com.google.cloud.compute.v1.InsertResourcePolicyRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import com.google.cloud.compute.v1.ResourcePolicy; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class CreateConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the region in which you want to create the consistency group. + String region = "us-central1"; + // Name of the consistency group you want to create. + String consistencyGroupName = "YOUR_CONSISTENCY_GROUP_NAME"; + + createConsistencyGroup(project, region, consistencyGroupName); + } + + // Creates a new consistency group resource policy in the specified project and region. + public static Operation.Status createConsistencyGroup( + String project, String region, String consistencyGroupName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ResourcePoliciesClient regionResourcePoliciesClient = ResourcePoliciesClient.create()) { + ResourcePolicy resourcePolicy = + ResourcePolicy.newBuilder() + .setName(consistencyGroupName) + .setRegion(region) + .setDiskConsistencyGroupPolicy( + ResourcePolicy.newBuilder().getDiskConsistencyGroupPolicy()) + .build(); + + InsertResourcePolicyRequest request = InsertResourcePolicyRequest.newBuilder() + .setProject(project) + .setRegion(region) + .setResourcePolicyResource(resourcePolicy) + .build(); + + Operation response = + regionResourcePoliciesClient.insertAsync(request).get(1, TimeUnit.MINUTES); + + if (response.hasError()) { + throw new Error("Error creating consistency group! " + response.getError()); + } + return response.getStatus(); + } + } +} +// [END compute_consistency_group_create] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java new file mode 100644 index 00000000000..89ab6f756e0 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/DeleteConsistencyGroup.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_delete] +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class DeleteConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Region in which your consistency group is located. + String region = "us-central1"; + // Name of the consistency group you want to delete. + String consistencyGroupName = "YOUR_CONSISTENCY_GROUP_NAME"; + + deleteConsistencyGroup(project, region, consistencyGroupName); + } + + // Deletes a consistency group resource policy in the specified project and region. + public static Operation.Status deleteConsistencyGroup( + String project, String region, String consistencyGroupName) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (ResourcePoliciesClient resourcePoliciesClient = ResourcePoliciesClient.create()) { + Operation response = resourcePoliciesClient + .deleteAsync(project, region, consistencyGroupName).get(1, TimeUnit.MINUTES); + + if (response.hasError()) { + throw new Error("Error deleting disk! " + response.getError()); + } + return response.getStatus(); + } + } +} +// [END compute_consistency_group_delete] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java new file mode 100644 index 00000000000..b791125b0dd --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/RemoveDiskFromConsistencyGroup.java @@ -0,0 +1,100 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_remove_disk] +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.DisksRemoveResourcePoliciesRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksClient; +import com.google.cloud.compute.v1.RegionDisksRemoveResourcePoliciesRequest; +import com.google.cloud.compute.v1.RemoveResourcePoliciesDiskRequest; +import com.google.cloud.compute.v1.RemoveResourcePoliciesRegionDiskRequest; +import java.io.IOException; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class RemoveDiskFromConsistencyGroup { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project that contains the disk. + String project = "YOUR_PROJECT_ID"; + // Zone or region of the disk. + String location = "us-central1"; + // Name of the disk. + String diskName = "DISK_NAME"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + removeDiskFromConsistencyGroup( + project, location, diskName, consistencyGroupName, consistencyGroupLocation); + } + + // Removes a disk from a consistency group. + public static Operation.Status removeDiskFromConsistencyGroup( + String project, String location, String diskName, + String consistencyGroupName, String consistencyGroupLocation) + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String consistencyGroupUrl = String.format( + "https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + Operation response; + if (Character.isDigit(location.charAt(location.length() - 1))) { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + RemoveResourcePoliciesRegionDiskRequest request = + RemoveResourcePoliciesRegionDiskRequest.newBuilder() + .setDisk(diskName) + .setRegion(location) + .setProject(project) + .setRegionDisksRemoveResourcePoliciesRequestResource( + RegionDisksRemoveResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + + response = disksClient.removeResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } else { + try (DisksClient disksClient = DisksClient.create()) { + RemoveResourcePoliciesDiskRequest request = + RemoveResourcePoliciesDiskRequest.newBuilder() + .setDisk(diskName) + .setZone(location) + .setProject(project) + .setDisksRemoveResourcePoliciesRequestResource( + DisksRemoveResourcePoliciesRequest.newBuilder() + .addAllResourcePolicies(Arrays.asList(consistencyGroupUrl)) + .build()) + .build(); + response = disksClient.removeResourcePoliciesAsync(request).get(1, TimeUnit.MINUTES); + } + } + if (response.hasError()) { + throw new Error("Error removing disk from consistency group! " + response.getError()); + } + return response.getStatus(); + } +} +// [END compute_consistency_group_remove_disk] \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java new file mode 100644 index 00000000000..6e901460ce0 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java @@ -0,0 +1,148 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.AddResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.InsertResourcePolicyRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.RegionDisksClient; +import com.google.cloud.compute.v1.RemoveResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.ResourcePoliciesClient; +import compute.disks.consistencygroup.AddDiskToConsistencyGroup; +import compute.disks.consistencygroup.CreateConsistencyGroup; +import compute.disks.consistencygroup.DeleteConsistencyGroup; +import compute.disks.consistencygroup.RemoveDiskFromConsistencyGroup; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; + +@RunWith(JUnit4.class) +@Timeout(value = 2, unit = TimeUnit.MINUTES) +public class ConsistencyGroupIT { + private static final String PROJECT_ID = "project-id"; + private static final String REGION = "asia-east1"; + private static final String CONSISTENCY_GROUP_NAME = "consistency-group"; + private static final String DISK_NAME = "disk-for-consistency"; + + @Test + public void testCreateConsistencyGroupResourcePolicy() throws Exception { + try (MockedStatic mockedResourcePoliciesClient = + mockStatic(ResourcePoliciesClient.class)) { + Operation operation = mock(Operation.class); + ResourcePoliciesClient mockClient = mock(ResourcePoliciesClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedResourcePoliciesClient.when(ResourcePoliciesClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertResourcePolicyRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = CreateConsistencyGroup.createConsistencyGroup( + PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + + verify(mockClient, times(1)).insertAsync(any(InsertResourcePolicyRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testAddRegionalDiskToConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + Operation operation = mock(Operation.class); + RegionDisksClient mockClient = mock(RegionDisksClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.addResourcePoliciesAsync(any(AddResourcePoliciesRegionDiskRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = AddDiskToConsistencyGroup.addDiskToConsistencyGroup( + PROJECT_ID, REGION, DISK_NAME, CONSISTENCY_GROUP_NAME, REGION); + + verify(mockClient, times(1)) + .addResourcePoliciesAsync(any(AddResourcePoliciesRegionDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testRemoveDiskFromConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + Operation operation = mock(Operation.class); + RegionDisksClient mockClient = mock(RegionDisksClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.removeResourcePoliciesAsync( + any(RemoveResourcePoliciesRegionDiskRequest.class))).thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = RemoveDiskFromConsistencyGroup.removeDiskFromConsistencyGroup( + PROJECT_ID, REGION, DISK_NAME, CONSISTENCY_GROUP_NAME, REGION); + + verify(mockClient, times(1)) + .removeResourcePoliciesAsync(any(RemoveResourcePoliciesRegionDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } + + @Test + public void testDeleteConsistencyGroup() throws Exception { + try (MockedStatic mockedResourcePoliciesClient = + mockStatic(ResourcePoliciesClient.class)) { + Operation operation = mock(Operation.class); + ResourcePoliciesClient mockClient = mock(ResourcePoliciesClient.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedResourcePoliciesClient.when(ResourcePoliciesClient::create).thenReturn(mockClient); + when(mockClient.deleteAsync(PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME)) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + + Operation.Status status = DeleteConsistencyGroup.deleteConsistencyGroup( + PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + + verify(mockClient, times(1)) + .deleteAsync(PROJECT_ID, REGION, CONSISTENCY_GROUP_NAME); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(Operation.Status.DONE, status); + } + } +} diff --git a/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java b/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java new file mode 100644 index 00000000000..b54af43baf4 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/CreateHyperdiskIT.java @@ -0,0 +1,79 @@ +/* +* Copyright 2024 Google LLC +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package compute.disks; + +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.compute.v1.Disk; +import java.io.IOException; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.junit.Assert; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class CreateHyperdiskIT { + private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); + private static final String ZONE = "us-west1-a"; + private static final String HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); + + // Check if the required environment variables are set. + public static void requireEnvVar(String envVarName) { + assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) + .that(System.getenv(envVarName)).isNotEmpty(); + } + + @BeforeAll + public static void setUp() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); + requireEnvVar("GOOGLE_CLOUD_PROJECT"); + } + + @AfterAll + public static void cleanup() + throws IOException, InterruptedException, ExecutionException, TimeoutException { + // Delete disk created for testing. + DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_NAME); + } + + @Test + public void testCreateHyperdisk() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); + + Disk hyperdisk = CreateHyperdisk + .createHyperdisk(PROJECT_ID, ZONE, HYPERDISK_NAME, diskType, + 10, 3000, 140); + + Assert.assertNotNull(hyperdisk); + Assert.assertEquals(HYPERDISK_NAME, hyperdisk.getName()); + Assert.assertEquals(3000, hyperdisk.getProvisionedIops()); + Assert.assertEquals(140, hyperdisk.getProvisionedThroughput()); + Assert.assertEquals(10, hyperdisk.getSizeGb()); + Assert.assertTrue(hyperdisk.getType().contains("hyperdisk-balanced")); + Assert.assertTrue(hyperdisk.getZone().contains(ZONE)); + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java new file mode 100644 index 00000000000..c30de733a53 --- /dev/null +++ b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java @@ -0,0 +1,132 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.api.gax.longrunning.OperationFuture; +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.InsertDiskRequest; +import com.google.cloud.compute.v1.InsertStoragePoolRequest; +import com.google.cloud.compute.v1.Operation; +import com.google.cloud.compute.v1.StoragePool; +import com.google.cloud.compute.v1.StoragePoolsClient; +import compute.disks.storagepool.CreateDiskInStoragePool; +import compute.disks.storagepool.CreateHyperdiskStoragePool; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +@RunWith(JUnit4.class) +@Timeout(value = 5, unit = TimeUnit.MINUTES) +public class HyperdiskIT { + private static final String PROJECT_ID = "project-id"; + private static final String ZONE = "asia-east1-a"; + private static final String HYPERDISK_IN_POOL_NAME = "hyperdisk"; + private static final String STORAGE_POOL_NAME = "storage-pool"; + private static final String PERFORMANCE_PROVISIONING_TYPE = "advanced"; + private static final String CAPACITY_PROVISIONING_TYPE = "advanced"; + + @Test + public void testCreateHyperdiskStoragePool() throws Exception { + String poolType = String.format( + "projects/%s/zones/%s/storagePoolTypes/%s", PROJECT_ID, ZONE, "hyperdisk-balanced"); + StoragePool storagePool = StoragePool.newBuilder() + .setZone(ZONE) + .setName(STORAGE_POOL_NAME) + .setStoragePoolType(poolType) + .setCapacityProvisioningType(CAPACITY_PROVISIONING_TYPE) + .setPoolProvisionedCapacityGb(10240) + .setPoolProvisionedIops(10000) + .setPoolProvisionedThroughput(1024) + .setPerformanceProvisioningType(PERFORMANCE_PROVISIONING_TYPE) + .build(); + try (MockedStatic mockedStoragePoolsClient = + mockStatic(StoragePoolsClient.class)) { + StoragePoolsClient mockClient = mock(StoragePoolsClient.class); + OperationFuture mockFuture = + mock(OperationFuture.class, Mockito.RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, Mockito.RETURNS_DEEP_STUBS); + + mockedStoragePoolsClient.when(StoragePoolsClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertStoragePoolRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + when(mockClient.get(PROJECT_ID, ZONE, STORAGE_POOL_NAME)).thenReturn(storagePool); + + + StoragePool expectedStoragePool = CreateHyperdiskStoragePool + .createHyperdiskStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME, poolType, + CAPACITY_PROVISIONING_TYPE, 10240, 10000, 1024, + PERFORMANCE_PROVISIONING_TYPE); + + verify(mockClient, times(1)).insertAsync(any(InsertStoragePoolRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(storagePool, expectedStoragePool); + } + } + + @Test + public void testCreateDiskInStoragePool() throws Exception { + String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); + Disk expectedHyperdisk = Disk.newBuilder() + .setZone(ZONE) + .setName(HYPERDISK_IN_POOL_NAME) + .setType(diskType) + .setSizeGb(10L) + .setProvisionedIops(3000L) + .setProvisionedThroughput(140L) + .build(); + String storagePoolLink = String.format("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/storagePools/%s", + PROJECT_ID, ZONE, STORAGE_POOL_NAME); + + try (MockedStatic mockedDisksClient = mockStatic(DisksClient.class)) { + DisksClient mockClient = mock(DisksClient.class); + OperationFuture mockFuture = + mock(OperationFuture.class, Mockito.RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, Mockito.RETURNS_DEEP_STUBS); + + mockedDisksClient.when(DisksClient::create).thenReturn(mockClient); + when(mockClient.insertAsync(any(InsertDiskRequest.class))).thenReturn(mockFuture); + when(mockFuture.get(anyLong(), any(TimeUnit.class))).thenReturn(operation); + when(operation.getStatus()).thenReturn(Operation.Status.DONE); + when(mockClient.get(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME)).thenReturn(expectedHyperdisk); + + + Disk returnedDisk = CreateDiskInStoragePool + .createDiskInStoragePool(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME, storagePoolLink, + diskType, 10, 3000, 140); + + verify(mockClient, times(1)).insertAsync(any(InsertDiskRequest.class)); + verify(mockFuture, times(1)).get(anyLong(), any(TimeUnit.class)); + assertEquals(expectedHyperdisk, returnedDisk); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java deleted file mode 100644 index 4b61e5bf16d..00000000000 --- a/compute/cloud-client/src/test/java/compute/disks/HyperdisksIT.java +++ /dev/null @@ -1,141 +0,0 @@ -/* -* Copyright 2024 Google LLC -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package compute.disks; - -import static com.google.common.truth.Truth.assertWithMessage; - -import com.google.cloud.compute.v1.Disk; -import com.google.cloud.compute.v1.StoragePool; -import compute.Util; -import compute.disks.storagepool.CreateDiskInStoragePool; -import compute.disks.storagepool.CreateHyperdiskStoragePool; -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import org.junit.Assert; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Order; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; -import org.junit.jupiter.api.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -@Timeout(value = 6, unit = TimeUnit.MINUTES) -@TestMethodOrder(MethodOrderer.OrderAnnotation.class) -public class HyperdisksIT { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String ZONE = "us-west1-a"; - private static final String HYPERDISK_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - private static final String HYPERDISK_IN_POOL_NAME = "test-hyperdisk-enc-" + UUID.randomUUID(); - private static final String STORAGE_POOL_NAME = "test-storage-pool-enc-" + UUID.randomUUID(); - private static final String PERFORMANCE_PROVISIONING_TYPE = "advanced"; - private static final String CAPACITY_PROVISIONING_TYPE = "advanced"; - - // Check if the required environment variables are set. - public static void requireEnvVar(String envVarName) { - assertWithMessage(String.format("Missing environment variable '%s' ", envVarName)) - .that(System.getenv(envVarName)).isNotEmpty(); - } - - @BeforeAll - public static void setUp() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - requireEnvVar("GOOGLE_CLOUD_PROJECT"); - } - - @AfterAll - public static void cleanup() - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // Delete all disks created for testing. - DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_NAME); - DeleteDisk.deleteDisk(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME); - - Util.deleteStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME); - } - - @Test - @Order(1) - public void testCreateHyperdisk() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); - - Disk hyperdisk = CreateHyperdisk - .createHyperdisk(PROJECT_ID, ZONE, HYPERDISK_NAME, diskType, - 10, 3000, 140); - - Assert.assertNotNull(hyperdisk); - Assert.assertEquals(HYPERDISK_NAME, hyperdisk.getName()); - Assert.assertEquals(3000, hyperdisk.getProvisionedIops()); - Assert.assertEquals(140, hyperdisk.getProvisionedThroughput()); - Assert.assertEquals(10, hyperdisk.getSizeGb()); - Assert.assertTrue(hyperdisk.getType().contains("hyperdisk-balanced")); - Assert.assertTrue(hyperdisk.getZone().contains(ZONE)); - } - - @Test - @Order(1) - public void testCreateHyperdiskStoragePool() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String poolType = String.format("projects/%s/zones/%s/storagePoolTypes/hyperdisk-balanced", - PROJECT_ID, ZONE); - StoragePool storagePool = CreateHyperdiskStoragePool - .createHyperdiskStoragePool(PROJECT_ID, ZONE, STORAGE_POOL_NAME, poolType, - CAPACITY_PROVISIONING_TYPE, 10240, 10000, 1024, - PERFORMANCE_PROVISIONING_TYPE); - - Assert.assertNotNull(storagePool); - Assert.assertEquals(STORAGE_POOL_NAME, storagePool.getName()); - Assert.assertEquals(10000, storagePool.getPoolProvisionedIops()); - Assert.assertEquals(1024, storagePool.getPoolProvisionedThroughput()); - Assert.assertEquals(10240, storagePool.getPoolProvisionedCapacityGb()); - Assert.assertTrue(storagePool.getStoragePoolType().contains("hyperdisk-balanced")); - Assert.assertTrue(storagePool.getCapacityProvisioningType() - .equalsIgnoreCase(CAPACITY_PROVISIONING_TYPE)); - Assert.assertTrue(storagePool.getPerformanceProvisioningType() - .equalsIgnoreCase(PERFORMANCE_PROVISIONING_TYPE)); - Assert.assertTrue(storagePool.getZone().contains(ZONE)); - } - - @Test - @Order(2) - public void testCreateDiskInStoragePool() - throws IOException, ExecutionException, InterruptedException, TimeoutException { - String diskType = String.format("zones/%s/diskTypes/hyperdisk-balanced", ZONE); - String storagePoolLink = String - .format("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/storagePools/%s", - PROJECT_ID, ZONE, STORAGE_POOL_NAME); - Disk disk = CreateDiskInStoragePool - .createDiskInStoragePool(PROJECT_ID, ZONE, HYPERDISK_IN_POOL_NAME, storagePoolLink, - diskType, 10, 3000, 140); - - Assert.assertNotNull(disk); - Assert.assertEquals(HYPERDISK_IN_POOL_NAME, disk.getName()); - Assert.assertTrue(disk.getStoragePool().contains(STORAGE_POOL_NAME)); - Assert.assertEquals(3000, disk.getProvisionedIops()); - Assert.assertEquals(140, disk.getProvisionedThroughput()); - Assert.assertEquals(10, disk.getSizeGb()); - Assert.assertTrue(disk.getType().contains("hyperdisk-balanced")); - Assert.assertTrue(disk.getZone().contains(ZONE)); - } -} \ No newline at end of file From fc31f6b0dd85df117418ad942ab3bec5d02ec373 Mon Sep 17 00:00:00 2001 From: surbhigarg92 Date: Mon, 2 Dec 2024 12:04:06 +0530 Subject: [PATCH 35/66] fix:spanner-opentelemetry documentation (#9744) --- spanner/opentelemetry/pom.xml | 4 ++-- .../main/java/com/example/spanner/OpenTelemetryUsage.java | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/spanner/opentelemetry/pom.xml b/spanner/opentelemetry/pom.xml index 46bd1872dbc..534c9ae3ca7 100644 --- a/spanner/opentelemetry/pom.xml +++ b/spanner/opentelemetry/pom.xml @@ -25,6 +25,7 @@ 1.2.0 + @@ -45,7 +46,6 @@ - com.google.cloud google-cloud-spanner @@ -66,7 +66,7 @@ io.opentelemetry opentelemetry-exporter-otlp - + diff --git a/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java b/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java index 43a9eb500d6..4648142d7bc 100644 --- a/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java +++ b/spanner/opentelemetry/src/main/java/com/example/spanner/OpenTelemetryUsage.java @@ -77,14 +77,16 @@ public static void main(String[] args) { .build(); Spanner spanner = options.getService(); - // [END spanner_opentelemetry_usage] DatabaseClient dbClient = spanner .getDatabaseClient(DatabaseId.of(projectId, instanceId, databaseId)); captureGfeMetric(dbClient); captureQueryStatsMetric(openTelemetry, dbClient); - sdkMeterProvider.forceFlush(); - sdkTracerProvider.forceFlush(); + + // Close the providers to free up the resources and export the data. */ + sdkMeterProvider.close(); + sdkTracerProvider.close(); + // [END spanner_opentelemetry_usage] } From ca5e81475fc25fac001d17179b73ba55d4ad9c3d Mon Sep 17 00:00:00 2001 From: Jacek Spalinski <69755075+jacspa96@users.noreply.github.com> Date: Mon, 2 Dec 2024 15:59:58 +0100 Subject: [PATCH 36/66] feat(dataplex): Fix dataplex quickstart flaky execution (#9792) * feat(dataplex): Increase wait time for resource creation * feat(dataplex): Adjust expected log in test * feat(dataplex): Fix typo in comments --------- Co-authored-by: Jacek Spalinski --- dataplex/quickstart/src/main/java/dataplex/Quickstart.java | 6 +++--- .../quickstart/src/test/java/dataplex/QuickstartIT.java | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/dataplex/quickstart/src/main/java/dataplex/Quickstart.java b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java index 16afff3660d..177d8c9a3d3 100644 --- a/dataplex/quickstart/src/main/java/dataplex/Quickstart.java +++ b/dataplex/quickstart/src/main/java/dataplex/Quickstart.java @@ -146,8 +146,8 @@ public static void quickstart( System.out.println("Step 3: Created entry group -> " + createdEntryGroup.getName()); // 4) Create Entry - // Wait 10 second to allow previously created resources to propagate - Thread.sleep(10000); + // Wait 30 seconds to allow previously created resources to propagate + Thread.sleep(30000); String aspectKey = String.format("%s.global.%s", projectId, aspectTypeId); Entry entry = Entry.newBuilder() @@ -208,7 +208,7 @@ public static void quickstart( }); // 6) Use Search capabilities to find Entry - // Wait 30 second to allow resources to propagate to Search + // Wait 30 seconds to allow resources to propagate to Search System.out.println("Step 6: Waiting for resources to propagate to Search..."); Thread.sleep(30000); SearchEntriesRequest searchEntriesRequest = diff --git a/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java index 9c2835cdd8f..62330c98eca 100644 --- a/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java +++ b/dataplex/quickstart/src/test/java/dataplex/QuickstartIT.java @@ -109,9 +109,7 @@ public void testQuickstart() { "Step 5: Retrieved entry -> projects/%s/locations/%s/entryGroups/%s/entries/%s", PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), // Step 6 - result from Search - String.format( - "projects/%s/locations/%s/entryGroups/%s/entries/%s", - PROJECT_ID, LOCATION, ENTRY_GROUP_ID, ENTRY_ID), + "Entries found in Search:", "Step 7: Successfully cleaned up resources"); Quickstart.quickstart( From 02ad120721858d97c5ec11aab847aaf060e96841 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 2 Dec 2024 10:06:59 -0500 Subject: [PATCH 37/66] chore: move CallLabels to core (#9746) --- .../examples/proxy/channelpool/DataChannel.java | 2 +- .../examples/proxy/channelpool/ResourceCollector.java | 2 +- .../bigtable/examples/proxy/commands/Verify.java | 2 -- .../examples/proxy/{metrics => core}/CallLabels.java | 3 ++- .../bigtable/examples/proxy/core/ProxyHandler.java | 1 - .../bigtable/examples/proxy/metrics/Metrics.java | 1 + .../bigtable/examples/proxy/metrics/MetricsImpl.java | 11 ++++++----- .../bigtable/examples/proxy/metrics/NoopMetrics.java | 1 + .../cloud/bigtable/examples/proxy/metrics/Tracer.java | 1 + .../examples/proxy/commands/ServeMetricsTest.java | 2 +- .../proxy/{metrics => core}/CallLabelsTest.java | 2 +- 11 files changed, 15 insertions(+), 13 deletions(-) rename bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/{metrics => core}/CallLabels.java (97%) rename bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/{metrics => core}/CallLabelsTest.java (99%) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index a43af3aaf0a..746bedde821 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -19,7 +19,7 @@ import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; -import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import com.google.common.util.concurrent.ListenableFuture; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java index 29beecf89e7..c5b529ab06e 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java @@ -17,7 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.channelpool; import com.google.bigtable.v2.PingAndWarmRequest; -import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableList; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java index 5e813b0d454..aee401c63ed 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -58,7 +58,6 @@ import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; import io.opentelemetry.sdk.resources.Resource; -import java.io.IOException; import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -190,7 +189,6 @@ void checkMetrics(Credentials creds) { Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); Instant start = end.minus(Duration.ofMinutes(1)); - GCPResourceProvider resourceProvider = new GCPResourceProvider(); Resource resource = Resource.create(resourceProvider.getAttributes()); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java similarity index 97% rename from bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java rename to bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java index 05d84582398..5eed055e35c 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -14,9 +14,10 @@ * limitations under the License. */ -package com.google.cloud.bigtable.examples.proxy.metrics; +package com.google.cloud.bigtable.examples.proxy.core; import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; import io.grpc.Metadata; import io.grpc.Metadata.Key; import io.grpc.MethodDescriptor; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java index 421c664325e..dfdbdd24ba2 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/ProxyHandler.java @@ -16,7 +16,6 @@ package com.google.cloud.bigtable.examples.proxy.core; -import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import io.grpc.CallCredentials; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index be39e8f6a6e..484708a2701 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import io.grpc.Status; import java.time.Duration; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 515e43d7ce7..c9304bcc5fc 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; import com.google.auth.Credentials; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; import io.grpc.Status; @@ -45,11 +46,11 @@ public class MetricsImpl implements Closeable, Metrics { public static final String METRIC_PREFIX = "bigtableproxy."; - static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); - static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); - static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); - static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); - static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + public static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); + public static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); + public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + public static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + public static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); public static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; public static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index c8a4fe3934f..88a85b6388a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import io.grpc.Status; import java.time.Duration; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index fe3a9c421fd..eadb9847eef 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -16,6 +16,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.common.base.Stopwatch; import io.grpc.CallOptions; import io.grpc.CallOptions.Key; diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java index 78bba61a0a6..66318cd57cf 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -29,7 +29,7 @@ import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase; import com.google.bigtable.v2.CheckAndMutateRowRequest; import com.google.bigtable.v2.CheckAndMutateRowResponse; -import com.google.cloud.bigtable.examples.proxy.metrics.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.common.collect.Lists; import io.grpc.CallOptions; diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java similarity index 99% rename from bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java rename to bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java index e2852ef88cb..23b071fe96c 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/CallLabelsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.google.cloud.bigtable.examples.proxy.metrics; +package com.google.cloud.bigtable.examples.proxy.core; import static com.google.common.truth.Truth.assertAbout; import static com.google.common.truth.Truth.assertThat; From c291101f04aacb87928eb97a7c110f44130fc3b3 Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Mon, 2 Dec 2024 11:23:23 -0600 Subject: [PATCH 38/66] chore(speech): delete sample speech_adaptation_beta (#9740) --- .../com/example/speech/SpeechAdaptation.java | 73 ------------------- .../example/speech/SpeechAdaptationTest.java | 55 -------------- 2 files changed, 128 deletions(-) delete mode 100644 speech/src/main/java/com/example/speech/SpeechAdaptation.java delete mode 100644 speech/src/test/java/com/example/speech/SpeechAdaptationTest.java diff --git a/speech/src/main/java/com/example/speech/SpeechAdaptation.java b/speech/src/main/java/com/example/speech/SpeechAdaptation.java deleted file mode 100644 index 4c51672d134..00000000000 --- a/speech/src/main/java/com/example/speech/SpeechAdaptation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.speech; - -// [START speech_adaptation_beta] -import com.google.cloud.speech.v1p1beta1.RecognitionAudio; -import com.google.cloud.speech.v1p1beta1.RecognitionConfig; -import com.google.cloud.speech.v1p1beta1.RecognizeRequest; -import com.google.cloud.speech.v1p1beta1.RecognizeResponse; -import com.google.cloud.speech.v1p1beta1.SpeechClient; -import com.google.cloud.speech.v1p1beta1.SpeechContext; -import com.google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative; -import com.google.cloud.speech.v1p1beta1.SpeechRecognitionResult; -import java.io.IOException; - -public class SpeechAdaptation { - - public void speechAdaptation() throws IOException { - String uriPath = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"; - speechAdaptation(uriPath); - } - - public static void speechAdaptation(String uriPath) throws IOException { - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources. - try (SpeechClient speechClient = SpeechClient.create()) { - - // Provides "hints" to the speech recognizer to favor specific words and phrases in the - // results. - // https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1p1beta1#google.cloud.speech.v1p1beta1.SpeechContext - SpeechContext speechContext = - SpeechContext.newBuilder().addPhrases("Brooklyn Bridge").setBoost(20.0F).build(); - // Configure recognition config to match your audio file. - RecognitionConfig config = - RecognitionConfig.newBuilder() - .setEncoding(RecognitionConfig.AudioEncoding.MP3) - .setSampleRateHertz(44100) - .setLanguageCode("en-US") - .addSpeechContexts(speechContext) - .build(); - // Set the path to your audio file - RecognitionAudio audio = RecognitionAudio.newBuilder().setUri(uriPath).build(); - - // Make the request - RecognizeRequest request = - RecognizeRequest.newBuilder().setConfig(config).setAudio(audio).build(); - - // Display the results - RecognizeResponse response = speechClient.recognize(request); - for (SpeechRecognitionResult result : response.getResultsList()) { - // First alternative is the most probable result - SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0); - System.out.printf("Transcript: %s\n", alternative.getTranscript()); - } - } - } -} -// [END speech_adaptation_beta] diff --git a/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java b/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java deleted file mode 100644 index a31b3637d5d..00000000000 --- a/speech/src/test/java/com/example/speech/SpeechAdaptationTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.speech; - -import static com.google.common.truth.Truth.assertThat; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class SpeechAdaptationTest { - private static final String AUDIO_FILE = "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"; - private ByteArrayOutputStream bout; - private PrintStream out; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); - } - - @After - public void tearDown() { - System.setOut(null); - } - - @Test - public void testTranscribeContextClasses() throws IOException { - SpeechAdaptation.speechAdaptation(AUDIO_FILE); - String got = bout.toString(); - assertThat(got).contains("Transcript:"); - } -} From 338932546d9581a060ce995815207b1546277c77 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 2 Dec 2024 14:31:48 -0500 Subject: [PATCH 39/66] chore: decouple Otel from CallLabels (#9747) --- .../examples/proxy/core/CallLabels.java | 18 +--- .../examples/proxy/metrics/Metrics.java | 20 ++-- .../examples/proxy/metrics/MetricsImpl.java | 68 +++++++++---- .../examples/proxy/metrics/NoopMetrics.java | 21 ++-- .../examples/proxy/metrics/Tracer.java | 18 ++-- .../proxy/commands/ServeMetricsTest.java | 96 ++++++++++--------- .../examples/proxy/core/CallLabelsTest.java | 33 ------- .../proxy/metrics/MetricsImplTest.java | 84 ++++++++++++++++ 8 files changed, 224 insertions(+), 134 deletions(-) create mode 100644 bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java index 5eed055e35c..2657b4f47af 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -17,11 +17,9 @@ package com.google.cloud.bigtable.examples.proxy.core; import com.google.auto.value.AutoValue; -import com.google.cloud.bigtable.examples.proxy.metrics.MetricsImpl; import io.grpc.Metadata; import io.grpc.Metadata.Key; import io.grpc.MethodDescriptor; -import io.opentelemetry.api.common.Attributes; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.Optional; @@ -70,15 +68,13 @@ static ResourceName create(ResourceNameType type, String value) { } } - abstract Optional getApiClient(); + public abstract Optional getApiClient(); public abstract Optional getResourceName(); public abstract Optional getAppProfileId(); - abstract String getMethodName(); - - public abstract Attributes getOtelAttributes(); + public abstract String getMethodName(); public static CallLabels create(MethodDescriptor method, Metadata headers) { Optional apiClient = Optional.ofNullable(headers.get(API_CLIENT)); @@ -96,15 +92,9 @@ public static CallLabels create( Optional apiClient, Optional resourceName, Optional appProfile) { - Attributes otelAttrs = - Attributes.builder() - .put(MetricsImpl.API_CLIENT_KEY, apiClient.orElse("")) - .put(MetricsImpl.RESOURCE_KEY, resourceName.orElse("")) - .put(MetricsImpl.APP_PROFILE_KEY, appProfile.orElse("")) - .put(MetricsImpl.METHOD_KEY, method.getFullMethodName()) - .build(); + return new AutoValue_CallLabels( - apiClient, resourceName, appProfile, method.getFullMethodName(), otelAttrs); + apiClient, resourceName, appProfile, method.getFullMethodName()); } private static Optional extractResourceName(String[] encodedKvPairs) { diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index 484708a2701..3c8d5e9cb24 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -17,26 +17,30 @@ package com.google.cloud.bigtable.examples.proxy.metrics; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; import io.grpc.Status; import java.time.Duration; public interface Metrics { + MetricsAttributes createAttributes(CallLabels callLabels); - void recordCallStarted(CallLabels labels); + void recordCallStarted(MetricsAttributes attrs); - void recordCredLatency(CallLabels labels, Status status, Duration duration); + void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration); - void recordQueueLatency(CallLabels labels, Duration duration); + void recordQueueLatency(MetricsAttributes attrs, Duration duration); - void recordRequestSize(CallLabels labels, long size); + void recordRequestSize(MetricsAttributes attrs, long size); - void recordResponseSize(CallLabels labels, long size); + void recordResponseSize(MetricsAttributes attrs, long size); - void recordGfeLatency(CallLabels labels, Duration duration); + void recordGfeLatency(MetricsAttributes attrs, Duration duration); - void recordGfeHeaderMissing(CallLabels labels); + void recordGfeHeaderMissing(MetricsAttributes attrs); - void recordCallLatency(CallLabels labels, Status status, Duration duration); + void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration); void updateChannelCount(int delta); + + interface MetricsAttributes {} } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index c9304bcc5fc..bedc70e208a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; import com.google.auth.Credentials; +import com.google.auto.value.AutoValue; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; @@ -28,6 +29,7 @@ import io.opentelemetry.api.metrics.LongHistogram; import io.opentelemetry.api.metrics.LongUpDownCounter; import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.metrics.SdkMeterProvider; @@ -56,7 +58,7 @@ public class MetricsImpl implements Closeable, Metrics { public static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; public static final String METRIC_PRESENCE_UNIT = "{process}"; - private final SdkMeterProvider meterProvider; + private final MeterProvider meterProvider; private final DoubleHistogram gfeLatency; private final LongCounter gfeResponseHeadersMissing; @@ -75,7 +77,11 @@ public class MetricsImpl implements Closeable, Metrics { () -> Resource.create(new GCPResourceProvider().getAttributes()); public MetricsImpl(Credentials credentials, String projectId) throws IOException { - meterProvider = createMeterProvider(credentials, projectId); + this(createMeterProvider(credentials, projectId)); + } + + MetricsImpl(MeterProvider meterProvider) { + this.meterProvider = meterProvider; Meter meter = meterProvider .meterBuilder(INSTRUMENTATION_SCOPE_INFO.getName()) @@ -172,8 +178,21 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException } @Override - public void close() { - meterProvider.close(); + public void close() throws IOException { + if (meterProvider instanceof Closeable) { + ((Closeable) meterProvider).close(); + } + } + + @Override + public MetricsAttributesImpl createAttributes(CallLabels callLabels) { + return new AutoValue_MetricsImpl_MetricsAttributesImpl( + Attributes.builder() + .put(MetricsImpl.API_CLIENT_KEY, callLabels.getApiClient().orElse("")) + .put(MetricsImpl.RESOURCE_KEY, callLabels.getResourceName().orElse("")) + .put(MetricsImpl.APP_PROFILE_KEY, callLabels.getAppProfileId().orElse("")) + .put(MetricsImpl.METHOD_KEY, callLabels.getMethodName()) + .build()); } private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { @@ -194,49 +213,49 @@ private static SdkMeterProvider createMeterProvider(Credentials credentials, Str } @Override - public void recordCallStarted(CallLabels labels) { - serverCallsStarted.add(1, labels.getOtelAttributes()); + public void recordCallStarted(MetricsAttributes attrs) { + serverCallsStarted.add(1, unwrap(attrs)); int outstanding = numOutstandingRpcs.incrementAndGet(); maxSeen.updateAndGet(n -> Math.max(outstanding, n)); } @Override - public void recordCredLatency(CallLabels labels, Status status, Duration duration) { + public void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration) { Attributes attributes = - labels.getOtelAttributes().toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); clientCredLatencies.record(duration.toMillis(), attributes); } @Override - public void recordQueueLatency(CallLabels labels, Duration duration) { - clientQueueLatencies.record(duration.toMillis(), labels.getOtelAttributes()); + public void recordQueueLatency(MetricsAttributes attrs, Duration duration) { + clientQueueLatencies.record(duration.toMillis(), unwrap(attrs)); } @Override - public void recordRequestSize(CallLabels labels, long size) { - requestSizes.record(size, labels.getOtelAttributes()); + public void recordRequestSize(MetricsAttributes attrs, long size) { + requestSizes.record(size, unwrap(attrs)); } @Override - public void recordResponseSize(CallLabels labels, long size) { - responseSizes.record(size, labels.getOtelAttributes()); + public void recordResponseSize(MetricsAttributes attrs, long size) { + responseSizes.record(size, unwrap(attrs)); } @Override - public void recordGfeLatency(CallLabels labels, Duration duration) { - gfeLatency.record(duration.toMillis(), labels.getOtelAttributes()); + public void recordGfeLatency(MetricsAttributes attrs, Duration duration) { + gfeLatency.record(duration.toMillis(), unwrap(attrs)); } @Override - public void recordGfeHeaderMissing(CallLabels labels) { - gfeResponseHeadersMissing.add(1, labels.getOtelAttributes()); + public void recordGfeHeaderMissing(MetricsAttributes attrs) { + gfeResponseHeadersMissing.add(1, unwrap(attrs)); } @Override - public void recordCallLatency(CallLabels labels, Status status, Duration duration) { + public void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration) { Attributes attributes = - labels.getOtelAttributes().toBuilder().put(STATUS_KEY, status.getCode().name()).build(); + unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); clientCallLatencies.record(duration.toMillis(), attributes); numOutstandingRpcs.decrementAndGet(); @@ -246,4 +265,13 @@ public void recordCallLatency(CallLabels labels, Status status, Duration duratio public void updateChannelCount(int delta) { channelCounter.add(delta); } + + static Attributes unwrap(MetricsAttributes wrapped) { + return ((MetricsAttributesImpl) wrapped).getAttributes(); + } + + @AutoValue + abstract static class MetricsAttributesImpl implements MetricsAttributes { + abstract Attributes getAttributes(); + } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index 88a85b6388a..7da029f9ba1 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -23,28 +23,33 @@ public class NoopMetrics implements Metrics { @Override - public void recordCallStarted(CallLabels labels) {} + public MetricsAttributes createAttributes(CallLabels callLabels) { + return null; + } @Override - public void recordCredLatency(CallLabels labels, Status status, Duration duration) {} + public void recordCallStarted(MetricsAttributes attrs) {} @Override - public void recordQueueLatency(CallLabels labels, Duration duration) {} + public void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration) {} @Override - public void recordRequestSize(CallLabels labels, long size) {} + public void recordQueueLatency(MetricsAttributes attrs, Duration duration) {} @Override - public void recordResponseSize(CallLabels labels, long size) {} + public void recordRequestSize(MetricsAttributes attrs, long size) {} @Override - public void recordGfeLatency(CallLabels labels, Duration duration) {} + public void recordResponseSize(MetricsAttributes attrs, long size) {} @Override - public void recordGfeHeaderMissing(CallLabels labels) {} + public void recordGfeLatency(MetricsAttributes attrs, Duration duration) {} @Override - public void recordCallLatency(CallLabels labels, Status status, Duration duration) {} + public void recordGfeHeaderMissing(MetricsAttributes attrs) {} + + @Override + public void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration) {} @Override public void updateChannelCount(int delta) {} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index eadb9847eef..a14ba9ece5e 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; import com.google.common.base.Stopwatch; import io.grpc.CallOptions; import io.grpc.CallOptions.Key; @@ -40,6 +41,7 @@ public class Tracer extends ClientStreamTracer { private final Metrics metrics; private final CallLabels callLabels; + private final MetricsAttributes attrs; private final Stopwatch stopwatch; private volatile Optional grpcQueueDuration = Optional.empty(); private final AtomicLong responseSize = new AtomicLong(); @@ -47,10 +49,11 @@ public class Tracer extends ClientStreamTracer { public Tracer(Metrics metrics, CallLabels callLabels) { this.metrics = metrics; this.callLabels = callLabels; + this.attrs = metrics.createAttributes(callLabels); stopwatch = Stopwatch.createStarted(); - metrics.recordCallStarted(callLabels); + metrics.recordCallStarted(attrs); } public CallOptions injectIntoCallOptions(CallOptions callOptions) { @@ -77,7 +80,7 @@ public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalU @Override public void outboundUncompressedSize(long bytes) { - metrics.recordRequestSize(callLabels, bytes); + metrics.recordRequestSize(attrs, bytes); } @Override @@ -94,19 +97,18 @@ public void inboundHeaders(Metadata headers) { .map(Long::parseLong) .map(Duration::ofMillis) .ifPresentOrElse( - d -> metrics.recordGfeLatency(callLabels, d), - () -> metrics.recordGfeHeaderMissing(callLabels)); + d -> metrics.recordGfeLatency(attrs, d), () -> metrics.recordGfeHeaderMissing(attrs)); } public void onCallFinished(Status status) { - grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(callLabels, d)); - metrics.recordResponseSize(callLabels, responseSize.get()); + grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(attrs, d)); + metrics.recordResponseSize(attrs, responseSize.get()); metrics.recordCallLatency( - callLabels, status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + attrs, status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); } public void onCredentialsFetch(Status status, Duration duration) { - metrics.recordCredLatency(callLabels, status, duration); + metrics.recordCredLatency(attrs, status, duration); } public CallLabels getCallLabels() { diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java index 66318cd57cf..bacf6a1b6ba 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -21,6 +21,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.verify; import com.google.auth.Credentials; @@ -31,6 +32,7 @@ import com.google.bigtable.v2.CheckAndMutateRowResponse; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; +import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; import com.google.common.collect.Lists; import io.grpc.CallOptions; import io.grpc.Channel; @@ -71,7 +73,6 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; @@ -203,6 +204,9 @@ public void start(Listener responseListener, Metadata headers) { } }); + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); doAnswer( invocation -> { Thread.sleep(10); @@ -217,7 +221,7 @@ public void start(Listener responseListener, Metadata headers) { return invocation.callRealMethod(); }) .when(fakeCredentials) - .getRequestMetadata(Mockito.any()); + .getRequestMetadata(any()); CheckAndMutateRowRequest request = CheckAndMutateRowRequest.newBuilder() @@ -225,24 +229,22 @@ public void start(Listener responseListener, Metadata headers) { .build(); CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); - CallLabels expectedLabels = - CallLabels.create( - BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")); - - verify(mockMetrics).recordCallStarted(eq(expectedLabels)); - verify(mockMetrics) - .recordCredLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(10))); - verify(mockMetrics).recordGfeLatency(eq(expectedLabels), eq(Duration.ofMillis(1234))); - verify(mockMetrics).recordQueueLatency(eq(expectedLabels), geq(Duration.ZERO)); verify(mockMetrics) - .recordRequestSize(eq(expectedLabels), eq((long) request.getSerializedSize())); - verify(mockMetrics) - .recordResponseSize(eq(expectedLabels), eq((long) response.getSerializedSize())); - verify(mockMetrics) - .recordCallLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(20))); + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")))); + + verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); + verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordGfeLatency(eq(fakeAttrs), eq(Duration.ofMillis(1234))); + verify(mockMetrics).recordQueueLatency(eq(fakeAttrs), geq(Duration.ZERO)); + verify(mockMetrics).recordRequestSize(eq(fakeAttrs), eq((long) request.getSerializedSize())); + verify(mockMetrics).recordResponseSize(eq(fakeAttrs), eq((long) response.getSerializedSize())); + verify(mockMetrics).recordCallLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(20))); } @Test @@ -276,25 +278,30 @@ public void start(Listener responseListener, Metadata headers) { } }); + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); + CheckAndMutateRowRequest request = CheckAndMutateRowRequest.newBuilder() .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") .build(); CheckAndMutateRowResponse response = stub.checkAndMutateRow(request); - CallLabels expectedLabels = - CallLabels.create( - BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")); - - verify(mockMetrics).recordGfeHeaderMissing(eq(expectedLabels)); + verify(mockMetrics) + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")))); + + verify(mockMetrics).recordGfeHeaderMissing(eq(fakeAttrs)); } @Test public void testError() throws IOException { - BigtableBlockingStub stub = + final BigtableBlockingStub stub = BigtableGrpc.newBlockingStub(proxyChannel) .withInterceptors( new ClientInterceptor() { @@ -329,7 +336,7 @@ public void start(Listener responseListener, Metadata headers) { return invocation.callRealMethod(); }) .when(fakeCredentials) - .getRequestMetadata(Mockito.any()); + .getRequestMetadata(any()); doAnswer( invocation -> { @@ -342,28 +349,31 @@ public void start(Listener responseListener, Metadata headers) { .when(dataService) .checkAndMutateRow(any(), any()); + MetricsAttributes fakeAttrs = new MetricsAttributes() {}; + doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); + CheckAndMutateRowRequest request = CheckAndMutateRowRequest.newBuilder() .setTableName("project/fake-project/instances/fake-instance/tables/fake-table") .build(); assertThrows(StatusRuntimeException.class, () -> stub.checkAndMutateRow(request)); - CallLabels expectedLabels = - CallLabels.create( - BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")); - - verify(mockMetrics).recordCallStarted(eq(expectedLabels)); - verify(mockMetrics) - .recordCredLatency(eq(expectedLabels), eq(Status.OK), geq(Duration.ofMillis(10))); - verify(mockMetrics).recordQueueLatency(eq(expectedLabels), geq(Duration.ZERO)); verify(mockMetrics) - .recordRequestSize(eq(expectedLabels), eq((long) request.getSerializedSize())); - verify(mockMetrics).recordResponseSize(eq(expectedLabels), eq(0L)); + .createAttributes( + eq( + CallLabels.create( + BigtableGrpc.getCheckAndMutateRowMethod(), + Optional.of("fake-client"), + Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), + Optional.of("fake-app-profile")))); + + verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); + verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); + verify(mockMetrics).recordQueueLatency(eq(fakeAttrs), geq(Duration.ZERO)); + verify(mockMetrics).recordRequestSize(eq(fakeAttrs), eq((long) request.getSerializedSize())); + verify(mockMetrics).recordResponseSize(eq(fakeAttrs), eq(0L)); verify(mockMetrics) - .recordCallLatency(eq(expectedLabels), eq(Status.INTERNAL), geq(Duration.ofMillis(20))); + .recordCallLatency(eq(fakeAttrs), eq(Status.INTERNAL), geq(Duration.ofMillis(20))); } static class MetadataInterceptor implements ServerInterceptor { diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java index 23b071fe96c..30fbee8f4b8 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java @@ -21,11 +21,9 @@ import com.google.bigtable.v2.BigtableGrpc; import com.google.common.truth.FailureMetadata; -import com.google.common.truth.MapSubject; import com.google.common.truth.Subject; import io.grpc.Metadata; import io.grpc.Metadata.Key; -import io.opentelemetry.api.common.AttributeKey; import java.util.Optional; import org.jspecify.annotations.Nullable; import org.junit.Test; @@ -50,14 +48,6 @@ public void testAllBasic() { assertThat(callLabels.getAppProfileId()).isEqualTo(Optional.of("a")); assertThat(callLabels.getResourceName()) .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); - - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast( - AttributeKey.stringKey("api_client"), "some-client", - AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t", - AttributeKey.stringKey("app_profile"), "a", - AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); } @Test @@ -68,9 +58,6 @@ public void testResourceEscaped() { assertThat(callLabels.getResourceName()) .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast(AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t"); } @Test @@ -79,13 +66,6 @@ public void testEmpty() { CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast( - AttributeKey.stringKey("api_client"), "", - AttributeKey.stringKey("resource"), "", - AttributeKey.stringKey("app_profile"), "", - AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); } @Test @@ -95,9 +75,6 @@ public void testMalformed1() { CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast(AttributeKey.stringKey("resource"), ""); } @Test @@ -107,9 +84,6 @@ public void testMalformed2() { CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast(AttributeKey.stringKey("resource"), ""); } @Test @@ -119,9 +93,6 @@ public void testMalformed3() { CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); - CallLabelsSubject.assertThat(callLabels) - .hasOtelAttributesThat() - .containsAtLeast(AttributeKey.stringKey("resource"), ""); } private static class CallLabelsSubject extends Subject { @@ -140,10 +111,6 @@ public static CallLabelsSubject assertThat(CallLabels callLabels) { return assertAbout(callLabels()).that(callLabels); } - public MapSubject hasOtelAttributesThat() { - return check("getOtelAttributes()").that(actual.getOtelAttributes().asMap()); - } - public void hasMethodName(String method) { check("getMethodName()").that(actual.getMethodName()).isEqualTo(method); } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java new file mode 100644 index 00000000000..59f66d44148 --- /dev/null +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.examples.proxy.metrics; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.bigtable.v2.BigtableGrpc; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.MeterProvider; +import java.util.Optional; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.Answers; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +@RunWith(JUnit4.class) +public class MetricsImplTest { + @Rule public final MockitoRule mockitoTestRule = MockitoJUnit.rule(); + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + MeterProvider mockMeterProvider; + + private MetricsImpl metrics; + + @Before + public void setUp() throws Exception { + metrics = new MetricsImpl(mockMeterProvider); + } + + @Test + public void testBasic() { + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.of("some-client"), + Optional.of("projects/p/instances/i/tables/t"), + Optional.of("a")); + Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); + assertThat(attrs.asMap()) + .containsAtLeast( + AttributeKey.stringKey("api_client"), "some-client", + AttributeKey.stringKey("resource"), "projects/p/instances/i/tables/t", + AttributeKey.stringKey("app_profile"), "a", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } + + @Test + public void testMissing() { + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.empty(), + Optional.empty(), + Optional.empty()); + Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); + assertThat(attrs.asMap()) + .containsAtLeast( + AttributeKey.stringKey("api_client"), "", + AttributeKey.stringKey("resource"), "", + AttributeKey.stringKey("app_profile"), "", + AttributeKey.stringKey("method"), "google.bigtable.v2.Bigtable/MutateRow"); + } +} From da1b9e5c2fb9b4b43e37e13a03b4a15d05d631d4 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Fri, 29 Nov 2024 11:01:54 -0500 Subject: [PATCH 40/66] chore: introduce PrimingKey to encapsulate PingAndWarm request & metadata --- .../proxy/channelpool/DataChannel.java | 31 +++----- .../proxy/channelpool/ResourceCollector.java | 38 ++++------ .../examples/proxy/core/CallLabels.java | 70 +++++++++++++++++++ 3 files changed, 95 insertions(+), 44 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index 746bedde821..7eba4e7bb4a 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -20,6 +20,7 @@ import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; import com.google.common.util.concurrent.ListenableFuture; @@ -37,8 +38,6 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import io.grpc.StatusRuntimeException; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -107,18 +106,18 @@ private void warmQuietly() { } private void warm() { - List requests = resourceCollector.getRequests(); - if (requests.isEmpty()) { + List primingKeys = resourceCollector.getPrimingKeys(); + if (primingKeys.isEmpty()) { return; } List> futures = - requests.stream().map(this::sendPingAndWarm).collect(Collectors.toList()); + primingKeys.stream().map(this::sendPingAndWarm).collect(Collectors.toList()); int successCount = 0; int failures = 0; for (ListenableFuture future : futures) { - PingAndWarmRequest request = requests.get(successCount + failures); + PrimingKey request = primingKeys.get(successCount + failures); try { future.get(); successCount++; @@ -151,13 +150,11 @@ private void warm() { } } - private ListenableFuture sendPingAndWarm(PingAndWarmRequest request) { - CallLabels callLabels = - CallLabels.create( - BigtableGrpc.getPingAndWarmMethod(), - Optional.of("bigtableproxy"), - Optional.of(request.getName()), - Optional.of(request.getAppProfileId())); + private ListenableFuture sendPingAndWarm(PrimingKey primingKey) { + Metadata metadata = primingKey.composeMetadata(); + PingAndWarmRequest request = primingKey.composeProto(); + + CallLabels callLabels = CallLabels.create(BigtableGrpc.getPingAndWarmMethod(), metadata); Tracer tracer = new Tracer(metrics, callLabels); CallOptions callOptions = @@ -169,14 +166,6 @@ private ListenableFuture sendPingAndWarm(PingAndWarmRequest ClientCall call = inner.newCall(BigtableGrpc.getPingAndWarmMethod(), callOptions); - Metadata metadata = new Metadata(); - metadata.put( - CallLabels.REQUEST_PARAMS, - String.format( - "name=%s&app_profile_id=%s", - URLEncoder.encode(request.getName(), StandardCharsets.UTF_8), - URLEncoder.encode(request.getAppProfileId(), StandardCharsets.UTF_8))); - SettableFuture f = SettableFuture.create(); call.start( new Listener<>() { diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java index c5b529ab06e..b9c492054f8 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java @@ -16,44 +16,36 @@ package com.google.cloud.bigtable.examples.proxy.channelpool; -import com.google.bigtable.v2.PingAndWarmRequest; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableList; import java.time.Duration; import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ResourceCollector { - private final Cache warmingRequests = + private static final Logger LOG = LoggerFactory.getLogger(ResourceCollector.class); + + private final Cache primingKeys = CacheBuilder.newBuilder().expireAfterWrite(Duration.ofHours(1)).maximumSize(100).build(); public void collect(CallLabels labels) { - String[] splits = labels.getResourceName().orElse("").split("/", 5); - if (splits.length <= 4) { - return; - } - if (!"projects".equals(splits[0])) { - return; + try { + PrimingKey.from(labels).ifPresent(k -> primingKeys.put(k, true)); + } catch (ParsingException e) { + LOG.atWarn().log("Failed to collect priming request for {}", labels, e); } - if (!"instances".equals(splits[2])) { - return; - } - String appProfile = labels.getAppProfileId().orElse(""); - - PingAndWarmRequest req = - PingAndWarmRequest.newBuilder() - .setName("projects/" + splits[1] + "/instances/" + splits[3]) - .setAppProfileId(appProfile) - .build(); - warmingRequests.put(req, true); } - public List getRequests() { - return ImmutableList.copyOf(warmingRequests.asMap().keySet()); + public List getPrimingKeys() { + return ImmutableList.copyOf(primingKeys.asMap().keySet()); } - public void evict(PingAndWarmRequest request) { - warmingRequests.invalidate(request); + public void evict(PrimingKey request) { + primingKeys.invalidate(request); } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java index 2657b4f47af..4f8bdcd65dc 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -17,11 +17,17 @@ package com.google.cloud.bigtable.examples.proxy.core; import com.google.auto.value.AutoValue; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.bigtable.v2.PingAndWarmRequest.Builder; +import com.google.common.collect.ImmutableMap; import io.grpc.Metadata; import io.grpc.Metadata.Key; import io.grpc.MethodDescriptor; import java.net.URLDecoder; +import java.net.URLEncoder; import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.Map.Entry; import java.util.Optional; /** @@ -155,4 +161,68 @@ private static Optional extractAppProfileId(String[] encodedKvPairs) { private static String percentDecode(String s) { return URLDecoder.decode(s, StandardCharsets.UTF_8); } + + @AutoValue + public abstract static class PrimingKey { + abstract Map getMetadata(); + + abstract String getName(); + + abstract Optional getAppProfileId(); + + public static Optional from(CallLabels labels) throws ParsingException { + Optional resourceName = labels.getResourceName(); + if (resourceName.isEmpty()) { + return Optional.empty(); + } + String[] resourceNameParts = resourceName.get().split("/", 5); + if (resourceNameParts.length < 4 + || !resourceNameParts[0].equals("projects") + || !resourceNameParts[2].equals("instances")) { + return Optional.empty(); + } + String instanceName = + "projects/" + resourceNameParts[1] + "/instances/" + resourceNameParts[3]; + StringBuilder reqParams = + new StringBuilder() + .append("name=") + .append(URLEncoder.encode(instanceName, StandardCharsets.UTF_8)); + + Optional appProfileId = labels.getAppProfileId(); + appProfileId.ifPresent(val -> reqParams.append("&app_profile_id=").append(val)); + + ImmutableMap.Builder md = ImmutableMap.builder(); + md.put(REQUEST_PARAMS.name(), reqParams.toString()); + + labels.getApiClient().ifPresent(c -> md.put(API_CLIENT.name(), c)); + + return Optional.of( + new AutoValue_CallLabels_PrimingKey(md.build(), instanceName, appProfileId)); + } + + public Metadata composeMetadata() { + Metadata md = new Metadata(); + for (Entry e : getMetadata().entrySet()) { + md.put(Key.of(e.getKey(), Metadata.ASCII_STRING_MARSHALLER), e.getValue()); + } + return md; + } + + public PingAndWarmRequest composeProto() { + Builder builder = PingAndWarmRequest.newBuilder().setName(getName()); + getAppProfileId().ifPresent(builder::setAppProfileId); + return builder.build(); + } + } + + public static class ParsingException extends Exception { + + public ParsingException(String message) { + super(message); + } + + public ParsingException(String message, Throwable cause) { + super(message, cause); + } + } } From 7c4ce969b37ff76935dcbb8d7df9da894718f7ba Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 2 Dec 2024 15:06:25 -0500 Subject: [PATCH 41/66] chore: support the rest of RLS headers for priming (#9749) NOTE: this depends on and includes #9748 --- .../examples/proxy/core/CallLabels.java | 98 ++++++++--- .../examples/proxy/metrics/MetricsImpl.java | 35 +++- .../proxy/commands/ServeMetricsTest.java | 156 +++++++++--------- .../examples/proxy/core/CallLabelsTest.java | 145 ++++++++++------ .../proxy/metrics/MetricsImplTest.java | 11 +- 5 files changed, 280 insertions(+), 165 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java index 4f8bdcd65dc..c15448ad717 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -19,6 +19,7 @@ import com.google.auto.value.AutoValue; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmRequest.Builder; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import io.grpc.Metadata; import io.grpc.Metadata.Key; @@ -29,6 +30,8 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A value class to encapsulate call identity. @@ -43,9 +46,18 @@ */ @AutoValue public abstract class CallLabels { - public static final Key REQUEST_PARAMS = + private static final Logger LOG = LoggerFactory.getLogger(CallLabels.class); + + // All RLS headers + static final Key REQUEST_PARAMS = Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); - private static final Key API_CLIENT = + static final Key LEGACY_RESOURCE_PREFIX = + Key.of("google-cloud-resource-prefix", Metadata.ASCII_STRING_MARSHALLER); + static final Key ROUTING_COOKIE = + Key.of("x-goog-cbt-cookie-routing", Metadata.ASCII_STRING_MARSHALLER); + static final Key FEATURE_FLAGS = + Key.of("bigtable-features", Metadata.ASCII_STRING_MARSHALLER); + static final Key API_CLIENT = Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); enum ResourceNameType { @@ -74,36 +86,56 @@ static ResourceName create(ResourceNameType type, String value) { } } - public abstract Optional getApiClient(); + public abstract String getMethodName(); - public abstract Optional getResourceName(); + abstract Optional getRequestParams(); - public abstract Optional getAppProfileId(); + abstract Optional getLegacyResourcePrefix(); - public abstract String getMethodName(); + abstract Optional getRoutingCookie(); + + abstract Optional getEncodedFeatures(); + + public abstract Optional getApiClient(); public static CallLabels create(MethodDescriptor method, Metadata headers) { Optional apiClient = Optional.ofNullable(headers.get(API_CLIENT)); - String requestParams = Optional.ofNullable(headers.get(REQUEST_PARAMS)).orElse(""); - String[] encodedKvPairs = requestParams.split("&"); - Optional resourceName = extractResourceName(encodedKvPairs).map(ResourceName::getValue); - Optional appProfile = extractAppProfileId(encodedKvPairs); + Optional requestParams = Optional.ofNullable(headers.get(REQUEST_PARAMS)); + Optional legacyResourcePrefix = + Optional.ofNullable(headers.get(LEGACY_RESOURCE_PREFIX)); + Optional routingCookie = Optional.ofNullable(headers.get(ROUTING_COOKIE)); + Optional encodedFeatures = Optional.ofNullable(headers.get(FEATURE_FLAGS)); - return create(method, apiClient, resourceName, appProfile); + return create( + method, requestParams, legacyResourcePrefix, routingCookie, encodedFeatures, apiClient); } + @VisibleForTesting public static CallLabels create( MethodDescriptor method, - Optional apiClient, - Optional resourceName, - Optional appProfile) { + Optional requestParams, + Optional legacyResourcePrefix, + Optional routingCookie, + Optional encodedFeatures, + Optional apiClient) { return new AutoValue_CallLabels( - apiClient, resourceName, appProfile, method.getFullMethodName()); + method.getFullMethodName(), + requestParams, + legacyResourcePrefix, + routingCookie, + encodedFeatures, + apiClient); } - private static Optional extractResourceName(String[] encodedKvPairs) { + public Optional extractResourceName() throws ParsingException { + if (getRequestParams().isEmpty()) { + return getLegacyResourcePrefix(); + } + + String requestParams = getRequestParams().orElse(""); + String[] encodedKvPairs = requestParams.split("&"); Optional resourceName = Optional.empty(); for (String encodedKv : encodedKvPairs) { @@ -132,10 +164,10 @@ private static Optional extractResourceName(String[] encodedKvPair resourceName = Optional.of(ResourceName.create(newType.get(), decodedValue)); } - return resourceName; + return resourceName.map(ResourceName::getValue); } - private static Optional findType(String encodedKey) { + private static Optional findType(String encodedKey) throws ParsingException { String decodedKey = percentDecode(encodedKey); for (ResourceNameType type : ResourceNameType.values()) { @@ -146,8 +178,10 @@ private static Optional findType(String encodedKey) { return Optional.empty(); } - private static Optional extractAppProfileId(String[] encodedKvPairs) { - for (String encodedPair : encodedKvPairs) { + public Optional extractAppProfileId() throws ParsingException { + String requestParams = getRequestParams().orElse(""); + + for (String encodedPair : requestParams.split("&")) { if (!encodedPair.startsWith("app_profile_id=")) { continue; } @@ -158,8 +192,12 @@ private static Optional extractAppProfileId(String[] encodedKvPairs) { return Optional.empty(); } - private static String percentDecode(String s) { - return URLDecoder.decode(s, StandardCharsets.UTF_8); + private static String percentDecode(String s) throws ParsingException { + try { + return URLDecoder.decode(s, StandardCharsets.UTF_8); + } catch (RuntimeException e) { + throw new ParsingException("Failed to url decode " + s, e); + } } @AutoValue @@ -171,7 +209,9 @@ public abstract static class PrimingKey { abstract Optional getAppProfileId(); public static Optional from(CallLabels labels) throws ParsingException { - Optional resourceName = labels.getResourceName(); + final ImmutableMap.Builder md = ImmutableMap.builder(); + + Optional resourceName = labels.extractResourceName(); if (resourceName.isEmpty()) { return Optional.empty(); } @@ -188,12 +228,18 @@ public static Optional from(CallLabels labels) throws ParsingExcepti .append("name=") .append(URLEncoder.encode(instanceName, StandardCharsets.UTF_8)); - Optional appProfileId = labels.getAppProfileId(); + Optional appProfileId = labels.extractAppProfileId(); appProfileId.ifPresent(val -> reqParams.append("&app_profile_id=").append(val)); - - ImmutableMap.Builder md = ImmutableMap.builder(); md.put(REQUEST_PARAMS.name(), reqParams.toString()); + labels + .getLegacyResourcePrefix() + .ifPresent(ignored -> md.put(LEGACY_RESOURCE_PREFIX.name(), instanceName)); + + labels.getRoutingCookie().ifPresent(c -> md.put(ROUTING_COOKIE.name(), c)); + + labels.getEncodedFeatures().ifPresent(c -> md.put(FEATURE_FLAGS.name(), c)); + labels.getApiClient().ifPresent(c -> md.put(API_CLIENT.name(), c)); return Optional.of( diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index bedc70e208a..27ae626f271 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -19,11 +19,13 @@ import com.google.auth.Credentials; import com.google.auto.value.AutoValue; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; import io.grpc.Status; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.metrics.DoubleHistogram; import io.opentelemetry.api.metrics.LongCounter; import io.opentelemetry.api.metrics.LongHistogram; @@ -41,8 +43,12 @@ import java.time.Duration; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class MetricsImpl implements Closeable, Metrics { + private static final Logger LOG = LoggerFactory.getLogger(MetricsImpl.class); + public static final InstrumentationScopeInfo INSTRUMENTATION_SCOPE_INFO = InstrumentationScopeInfo.create("bigtable-proxy"); @@ -186,13 +192,30 @@ public void close() throws IOException { @Override public MetricsAttributesImpl createAttributes(CallLabels callLabels) { - return new AutoValue_MetricsImpl_MetricsAttributesImpl( + AttributesBuilder attrs = Attributes.builder() - .put(MetricsImpl.API_CLIENT_KEY, callLabels.getApiClient().orElse("")) - .put(MetricsImpl.RESOURCE_KEY, callLabels.getResourceName().orElse("")) - .put(MetricsImpl.APP_PROFILE_KEY, callLabels.getAppProfileId().orElse("")) - .put(MetricsImpl.METHOD_KEY, callLabels.getMethodName()) - .build()); + .put(METHOD_KEY, callLabels.getMethodName()) + .put(API_CLIENT_KEY, callLabels.getApiClient().orElse("")); + + String resourceValue; + try { + resourceValue = callLabels.extractResourceName().orElse(""); + } catch (ParsingException e) { + LOG.atWarn().log("Failed to extract resource from callLabels: {}", callLabels, e); + resourceValue = ""; + } + attrs.put(MetricsImpl.RESOURCE_KEY, resourceValue); + + String appProfile; + try { + appProfile = callLabels.extractAppProfileId().orElse(""); + } catch (ParsingException e) { + LOG.atWarn().log("Failed to extract app profile from callLabels: {}", callLabels, e); + appProfile = ""; + } + attrs.put(MetricsImpl.APP_PROFILE_KEY, appProfile); + + return new AutoValue_MetricsImpl_MetricsAttributesImpl(attrs.build()); } private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java index bacf6a1b6ba..23479c25b90 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/commands/ServeMetricsTest.java @@ -33,6 +33,7 @@ import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import io.grpc.CallOptions; import io.grpc.Channel; @@ -61,6 +62,7 @@ import java.time.Duration; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingDeque; @@ -178,31 +180,15 @@ public void testHappyPath() throws IOException { BigtableBlockingStub stub = BigtableGrpc.newBlockingStub(proxyChannel) .withInterceptors( - new ClientInterceptor() { - @Override - public ClientCall interceptCall( - MethodDescriptor methodDescriptor, - CallOptions callOptions, - Channel channel) { - return new SimpleForwardingClientCall<>( - channel.newCall(methodDescriptor, callOptions)) { - @Override - public void start(Listener responseListener, Metadata headers) { - // inject call labels - headers.put( - Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), - String.format( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", - "fake-project", "fake-instance", "fake-table", "fake-app-profile")); - headers.put( - Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), - "fake-client"); - - super.start(responseListener, headers); - } - }; - } - }); + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); MetricsAttributes fakeAttrs = new MetricsAttributes() {}; @@ -234,9 +220,15 @@ public void start(Listener responseListener, Metadata headers) { eq( CallLabels.create( BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")))); + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); @@ -252,31 +244,15 @@ public void testMissingGfe() throws IOException { BigtableBlockingStub stub = BigtableGrpc.newBlockingStub(proxyChannel) .withInterceptors( - new ClientInterceptor() { - @Override - public ClientCall interceptCall( - MethodDescriptor methodDescriptor, - CallOptions callOptions, - Channel channel) { - return new SimpleForwardingClientCall<>( - channel.newCall(methodDescriptor, callOptions)) { - @Override - public void start(Listener responseListener, Metadata headers) { - // inject call labels - headers.put( - Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), - String.format( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", - "fake-project", "fake-instance", "fake-table", "fake-app-profile")); - headers.put( - Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), - "fake-client"); - - super.start(responseListener, headers); - } - }; - } - }); + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); MetricsAttributes fakeAttrs = new MetricsAttributes() {}; doReturn(fakeAttrs).when(mockMetrics).createAttributes(any()); @@ -292,9 +268,15 @@ public void start(Listener responseListener, Metadata headers) { eq( CallLabels.create( BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")))); + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); verify(mockMetrics).recordGfeHeaderMissing(eq(fakeAttrs)); } @@ -304,31 +286,15 @@ public void testError() throws IOException { final BigtableBlockingStub stub = BigtableGrpc.newBlockingStub(proxyChannel) .withInterceptors( - new ClientInterceptor() { - @Override - public ClientCall interceptCall( - MethodDescriptor methodDescriptor, - CallOptions callOptions, - Channel channel) { - return new SimpleForwardingClientCall<>( - channel.newCall(methodDescriptor, callOptions)) { - @Override - public void start(Listener responseListener, Metadata headers) { - // inject call labels - headers.put( - Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), - String.format( + new OutgoingMetadataInterceptor( + ImmutableMap.of( + "x-goog-request-params", + String.format( "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", - "fake-project", "fake-instance", "fake-table", "fake-app-profile")); - headers.put( - Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER), - "fake-client"); - - super.start(responseListener, headers); - } - }; - } - }); + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F"), + "x-goog-api-client", + "fake-client"))); doAnswer( invocation -> { @@ -363,9 +329,15 @@ public void start(Listener responseListener, Metadata headers) { eq( CallLabels.create( BigtableGrpc.getCheckAndMutateRowMethod(), - Optional.of("fake-client"), - Optional.of("projects/fake-project/instances/fake-instance/tables/fake-table"), - Optional.of("fake-app-profile")))); + Optional.of( + String.format( + "table_name=projects/%s/instances/%s/tables/%s&app_profile_id=%s", + "fake-project", "fake-instance", "fake-table", "fake-profile") + .replaceAll("/", "%2F")), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of("fake-client")))); verify(mockMetrics).recordCallStarted(eq(fakeAttrs)); verify(mockMetrics).recordCredLatency(eq(fakeAttrs), eq(Status.OK), geq(Duration.ofMillis(10))); @@ -444,4 +416,26 @@ public void refresh() throws IOException { // noop } } + + private static class OutgoingMetadataInterceptor implements ClientInterceptor { + private final Map metadata; + + private OutgoingMetadataInterceptor(Map metadata) { + this.metadata = metadata; + } + + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, CallOptions callOptions, Channel channel) { + return new SimpleForwardingClientCall<>(channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + for (Entry entry : metadata.entrySet()) { + headers.put(Key.of(entry.getKey(), Metadata.ASCII_STRING_MARSHALLER), entry.getValue()); + } + super.start(responseListener, headers); + } + }; + } + } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java index 30fbee8f4b8..c17278c2e8d 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/core/CallLabelsTest.java @@ -16,109 +16,154 @@ package com.google.cloud.bigtable.examples.proxy.core; -import static com.google.common.truth.Truth.assertAbout; import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; import com.google.bigtable.v2.BigtableGrpc; -import com.google.common.truth.FailureMetadata; -import com.google.common.truth.Subject; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; import io.grpc.Metadata; -import io.grpc.Metadata.Key; import java.util.Optional; -import org.jspecify.annotations.Nullable; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @RunWith(JUnit4.class) public class CallLabelsTest { - private static final Key REQUEST_PARAMS = - Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER); - private static final Key API_CLIENT = - Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER); - @Test - public void testAllBasic() { + public void testAllBasic() throws ParsingException { Metadata md = new Metadata(); - md.put(REQUEST_PARAMS, "table_name=projects/p/instances/i/tables/t&app_profile_id=a"); - md.put(API_CLIENT, "some-client"); + md.put( + CallLabels.REQUEST_PARAMS, + "table_name=projects/p/instances/i/tables/t&app_profile_id=a".replaceAll("/", "%2F")); + md.put(CallLabels.LEGACY_RESOURCE_PREFIX, "projects/p/instances/i/tables/t"); + md.put(CallLabels.ROUTING_COOKIE, "some-opaque-string"); + md.put(CallLabels.FEATURE_FLAGS, "some-serialized-features-string"); + md.put(CallLabels.API_CLIENT, "some-client"); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); + assertThat(callLabels.getRequestParams()) + .isEqualTo( + Optional.of("table_name=projects%2Fp%2Finstances%2Fi%2Ftables%2Ft&app_profile_id=a")); + assertThat(callLabels.getLegacyResourcePrefix()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); + assertThat(callLabels.getRoutingCookie()).isEqualTo(Optional.of("some-opaque-string")); + assertThat(callLabels.getEncodedFeatures()) + .isEqualTo(Optional.of("some-serialized-features-string")); assertThat(callLabels.getApiClient()).isEqualTo(Optional.of("some-client")); - assertThat(callLabels.getAppProfileId()).isEqualTo(Optional.of("a")); - assertThat(callLabels.getResourceName()) + + assertThat(callLabels.extractAppProfileId()).isEqualTo(Optional.of("a")); + assertThat(callLabels.extractResourceName()) .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); } @Test - public void testResourceEscaped() { + public void testResourceEscaped() throws ParsingException { Metadata md = new Metadata(); - md.put(REQUEST_PARAMS, "table_name=projects/p/instances/i/tables/t".replace("/", "%2F")); + md.put( + CallLabels.REQUEST_PARAMS, + "table_name=projects/p/instances/i/tables/t".replace("/", "%2F")); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - assertThat(callLabels.getResourceName()) + assertThat(callLabels.extractResourceName()) .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); } @Test - public void testEmpty() { + public void testEmpty() throws ParsingException { Metadata md = new Metadata(); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractAppProfileId()).isEqualTo(Optional.empty()); } @Test - public void testMalformed1() { + public void testLegacyFallback() throws ParsingException { Metadata md = new Metadata(); - md.put(REQUEST_PARAMS, "table_name="); + md.put(CallLabels.LEGACY_RESOURCE_PREFIX, "projects/p/instances/i/tables/t"); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractResourceName()) + .isEqualTo(Optional.of("projects/p/instances/i/tables/t")); } @Test - public void testMalformed2() { + public void testMalformed1() throws ParsingException { Metadata md = new Metadata(); - md.put(REQUEST_PARAMS, "&"); + md.put(CallLabels.REQUEST_PARAMS, "table_name="); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); } @Test - public void testMalformed3() { + public void testMalformed2() throws ParsingException { Metadata md = new Metadata(); - md.put(REQUEST_PARAMS, "table_name=&"); + md.put(CallLabels.REQUEST_PARAMS, "&"); CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - assertThat(callLabels.getResourceName()).isEqualTo(Optional.empty()); + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); } - private static class CallLabelsSubject extends Subject { - private final CallLabels actual; - - public CallLabelsSubject(FailureMetadata metadata, @Nullable CallLabels actual) { - super(metadata, actual); - this.actual = actual; - } + @Test + public void testMalformed3() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "table_name=&"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - public static Factory callLabels() { - return CallLabelsSubject::new; - } + assertThat(callLabels.extractResourceName()).isEqualTo(Optional.empty()); + } - public static CallLabelsSubject assertThat(CallLabels callLabels) { - return assertAbout(callLabels()).that(callLabels); - } + @Test + public void testMalformed4() throws ParsingException { + Metadata md = new Metadata(); + md.put(CallLabels.REQUEST_PARAMS, "table_name=%s"); + CallLabels callLabels = CallLabels.create(BigtableGrpc.getMutateRowMethod(), md); - public void hasMethodName(String method) { - check("getMethodName()").that(actual.getMethodName()).isEqualTo(method); - } + assertThrows(ParsingException.class, callLabels::extractResourceName); + } - public void hasResourceName(String resourceName) { - check("hasResourceName()") - .that(actual.getResourceName()) - .isEqualTo(Optional.of(resourceName)); - } + @Test + public void testPrimingKey() throws ParsingException { + final String tableName = "projects/myp/instances/myi/tables/myt"; + final String encodedTableName = "projects%2Fmyp%2Finstances%2Fmyi%2Ftables%2Fmyt"; + final String instanceName = "projects/myp/instances/myi"; + final String encodedInstanceName = "projects%2Fmyp%2Finstances%2Fmyi"; + final String appProfileId = "mya"; + + CallLabels callLabels = + CallLabels.create( + BigtableGrpc.getMutateRowMethod(), + Optional.of( + String.format("table_name=%s&app_profile_id=%s", encodedTableName, appProfileId)), + Optional.of(tableName), + Optional.of("opaque-cookie"), + Optional.of("encoded-features"), + Optional.of("some-client")); + PrimingKey key = PrimingKey.from(callLabels).get(); + + assertThat(key.getAppProfileId()).isEqualTo(Optional.of("mya")); + assertThat(key.getName()).isEqualTo(instanceName); + + Metadata m = new Metadata(); + + m.put( + CallLabels.REQUEST_PARAMS, + String.format("name=%s&app_profile_id=%s", encodedInstanceName, appProfileId)); + m.put(CallLabels.LEGACY_RESOURCE_PREFIX, instanceName); + m.put(CallLabels.ROUTING_COOKIE, "opaque-cookie"); + m.put(CallLabels.FEATURE_FLAGS, "encoded-features"); + m.put(CallLabels.API_CLIENT, "some-client"); + + assertThat(key.composeMetadata().toString()).isEqualTo(m.toString()); + + assertThat(key.composeProto()) + .isEqualTo( + PingAndWarmRequest.newBuilder() + .setName(instanceName) + .setAppProfileId(appProfileId) + .build()); } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java index 59f66d44148..7fd741a5445 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImplTest.java @@ -53,9 +53,14 @@ public void testBasic() { CallLabels callLabels = CallLabels.create( BigtableGrpc.getMutateRowMethod(), - Optional.of("some-client"), + Optional.of( + "table_name=projects/p/instances/i/tables/t&app_profile_id=a" + .replaceAll("/", "%2F")), Optional.of("projects/p/instances/i/tables/t"), - Optional.of("a")); + Optional.of("opaque-cookie"), + Optional.of("encoded-features"), + Optional.of("some-client")); + Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); assertThat(attrs.asMap()) .containsAtLeast( @@ -72,6 +77,8 @@ public void testMissing() { BigtableGrpc.getMutateRowMethod(), Optional.empty(), Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.empty()); Attributes attrs = metrics.createAttributes(callLabels).getAttributes(); assertThat(attrs.asMap()) From 529a88691c181045494033a042a5eb5dcf7c82ab Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:23:15 -0600 Subject: [PATCH 42/66] chore(functions): delete sample functions_ci_cd_cloud_build (#9795) --- functions/README.md | 1 - functions/ci_cd/cloudbuild.yaml | 23 ----------------------- 2 files changed, 24 deletions(-) delete mode 100644 functions/ci_cd/cloudbuild.yaml diff --git a/functions/README.md b/functions/README.md index 5d5d6086a7b..8258e9db6e5 100644 --- a/functions/README.md +++ b/functions/README.md @@ -21,7 +21,6 @@ There are two versions of Cloud Run functions: * [Slack](slack/) * [OCR tutorial](v2/ocr/) * [ImageMagick](v2/imagemagick/) -* [CI/CD setup](ci_cd/) ## Running Functions Locally The [Java Functions Framework](https://github.com/GoogleCloudPlatform/functions-framework-java) diff --git a/functions/ci_cd/cloudbuild.yaml b/functions/ci_cd/cloudbuild.yaml deleted file mode 100644 index 38b402a8b4d..00000000000 --- a/functions/ci_cd/cloudbuild.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# [START functions_ci_cd_cloud_build] -steps: -- name: 'gcr.io/cloud-builders/mvn' - args: ['clean', 'verify'] - dir: 'function/dir/from/repo/root' -- name: 'gcr.io/cloud-builders/gcloud' - args: ['functions', 'deploy', '[YOUR_DEPLOYED_FUNCTION_NAME]', '[YOUR_FUNCTION_TRIGGER]', '--runtime', 'java11', '--entry-point', '[YOUR_FUNCTION_NAME_IN_CODE]'] - dir: 'function/dir/from/repo/root' -# [END functions_ci_cd_cloud_build] From 06f5d07c5a93c8b3b0e5a83ecc21b5a8ec80a862 Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:25:34 -0600 Subject: [PATCH 43/66] chore(tts): delete sample tts_synthesize_text_file (#9745) --- .../example/texttospeech/SynthesizeFile.java | 155 ------------------ .../texttospeech/SynthesizeFileIT.java | 78 --------- 2 files changed, 233 deletions(-) delete mode 100644 texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java delete mode 100644 texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java diff --git a/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java b/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java deleted file mode 100644 index bea8c47748d..00000000000 --- a/texttospeech/beta/src/main/java/com/example/texttospeech/SynthesizeFile.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.texttospeech; - -// Imports the Google Cloud client library -import com.google.cloud.texttospeech.v1beta1.AudioConfig; -import com.google.cloud.texttospeech.v1beta1.AudioEncoding; -import com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender; -import com.google.cloud.texttospeech.v1beta1.SynthesisInput; -import com.google.cloud.texttospeech.v1beta1.SynthesizeSpeechResponse; -import com.google.cloud.texttospeech.v1beta1.TextToSpeechClient; -import com.google.cloud.texttospeech.v1beta1.VoiceSelectionParams; -import com.google.protobuf.ByteString; -import java.io.FileOutputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Paths; -import net.sourceforge.argparse4j.ArgumentParsers; -import net.sourceforge.argparse4j.inf.ArgumentParser; -import net.sourceforge.argparse4j.inf.ArgumentParserException; -import net.sourceforge.argparse4j.inf.MutuallyExclusiveGroup; -import net.sourceforge.argparse4j.inf.Namespace; - -/** - * Google Cloud TextToSpeech API sample application. Example usage: mvn package exec:java - * -Dexec.mainClass='com.example.texttospeech.SynthesizeFile' -Dexec.args='--text - * resources/hello.txt' - */ -public class SynthesizeFile { - - // [START tts_synthesize_text_file] - /** - * Demonstrates using the Text to Speech client to synthesize a text file or ssml file. - * - * @param textFile the text file to be synthesized. (e.g., hello.txt) - * @throws Exception on TextToSpeechClient Errors. - */ - public static void synthesizeTextFile(String textFile) throws Exception { - // Instantiates a client - try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) { - // Read the file's contents - String contents = new String(Files.readAllBytes(Paths.get(textFile))); - // Set the text input to be synthesized - SynthesisInput input = SynthesisInput.newBuilder().setText(contents).build(); - - // Build the voice request - VoiceSelectionParams voice = - VoiceSelectionParams.newBuilder() - .setLanguageCode("en-US") // languageCode = "en_us" - .setSsmlGender(SsmlVoiceGender.FEMALE) // ssmlVoiceGender = SsmlVoiceGender.FEMALE - .build(); - - // Select the type of audio file you want returned - AudioConfig audioConfig = - AudioConfig.newBuilder() - .setAudioEncoding(AudioEncoding.MP3) // MP3 audio. - .build(); - - // Perform the text-to-speech request - SynthesizeSpeechResponse response = - textToSpeechClient.synthesizeSpeech(input, voice, audioConfig); - - // Get the audio contents from the response - ByteString audioContents = response.getAudioContent(); - - // Write the response to the output file. - try (OutputStream out = new FileOutputStream("output.mp3")) { - out.write(audioContents.toByteArray()); - System.out.println("Audio content written to file \"output.mp3\""); - } - } - } - // [END tts_synthesize_text_file] - - // [START tts_synthesize_ssml_file] - /** - * Demonstrates using the Text to Speech client to synthesize a text file or ssml file. - * - * @param ssmlFile the ssml document to be synthesized. (e.g., hello.ssml) - * @throws Exception on TextToSpeechClient Errors. - */ - public static void synthesizeSsmlFile(String ssmlFile) throws Exception { - // Instantiates a client - try (TextToSpeechClient textToSpeechClient = TextToSpeechClient.create()) { - // Read the file's contents - String contents = new String(Files.readAllBytes(Paths.get(ssmlFile))); - // Set the ssml input to be synthesized - SynthesisInput input = SynthesisInput.newBuilder().setSsml(contents).build(); - - // Build the voice request - VoiceSelectionParams voice = - VoiceSelectionParams.newBuilder() - .setLanguageCode("en-US") // languageCode = "en_us" - .setSsmlGender(SsmlVoiceGender.FEMALE) // ssmlVoiceGender = SsmlVoiceGender.FEMALE - .build(); - - // Select the type of audio file you want returned - AudioConfig audioConfig = - AudioConfig.newBuilder() - .setAudioEncoding(AudioEncoding.MP3) // MP3 audio. - .build(); - - // Perform the text-to-speech request - SynthesizeSpeechResponse response = - textToSpeechClient.synthesizeSpeech(input, voice, audioConfig); - - // Get the audio contents from the response - ByteString audioContents = response.getAudioContent(); - - // Write the response to the output file. - try (OutputStream out = new FileOutputStream("output.mp3")) { - out.write(audioContents.toByteArray()); - System.out.println("Audio content written to file \"output.mp3\""); - } - } - } - // [END tts_synthesize_ssml_file] - - public static void main(String... args) throws Exception { - ArgumentParser parser = - ArgumentParsers.newFor("SynthesizeFile") - .build() - .defaultHelp(true) - .description("Synthesize a text file or ssml file."); - MutuallyExclusiveGroup group = parser.addMutuallyExclusiveGroup().required(true); - group.addArgument("--text").help("The text file from which to synthesize speech."); - group.addArgument("--ssml").help("The ssml file from which to synthesize speech."); - - try { - Namespace namespace = parser.parseArgs(args); - - if (namespace.get("text") != null) { - synthesizeTextFile(namespace.getString("text")); - } else { - synthesizeSsmlFile(namespace.getString("ssml")); - } - } catch (ArgumentParserException e) { - parser.handleError(e); - } - } -} diff --git a/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java b/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java deleted file mode 100644 index eed608b1c18..00000000000 --- a/texttospeech/beta/src/test/java/com/example/texttospeech/SynthesizeFileIT.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.texttospeech; - -import static com.google.common.truth.Truth.assertThat; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.PrintStream; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for SynthesizeFile sample. */ -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class SynthesizeFileIT { - - private static String OUTPUT = "output.mp3"; - private static String TEXT_FILE = "resources/hello.txt"; - private static String SSML_FILE = "resources/hello.ssml"; - - private ByteArrayOutputStream bout; - private PrintStream out; - private File outputFile; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - System.setOut(out); - } - - @After - public void tearDown() { - outputFile.delete(); - } - - @Test - public void testSynthesizeText() throws Exception { - // Act - SynthesizeFile.synthesizeTextFile(TEXT_FILE); - - // Assert - outputFile = new File(OUTPUT); - assertThat(outputFile.isFile()).isTrue(); - String got = bout.toString(); - assertThat(got).contains("Audio content written to file \"output.mp3\""); - } - - @Test - public void testSynthesizeSsml() throws Exception { - // Act - SynthesizeFile.synthesizeSsmlFile(SSML_FILE); - - // Assert - outputFile = new File(OUTPUT); - assertThat(outputFile.isFile()).isTrue(); - String got = bout.toString(); - assertThat(got).contains("Audio content written to file \"output.mp3\""); - } -} From 3fcc6698d8b10a08630a412f7b52f2f655ea41a8 Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:26:33 -0600 Subject: [PATCH 44/66] chore(documentai): delete sample documentai_process_quality_document (#9793) --- .../v1beta3/ProcessQualityDocument.java | 105 ------------------ .../v1beta3/ProcessQualityDocumentTest.java | 77 ------------- 2 files changed, 182 deletions(-) delete mode 100644 document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java delete mode 100644 document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java diff --git a/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java b/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java deleted file mode 100644 index c212777f874..00000000000 --- a/document-ai/src/main/java/documentai/v1beta3/ProcessQualityDocument.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package documentai.v1beta3; - -// [START documentai_process_quality_document] - -import com.google.cloud.documentai.v1beta3.Document; -import com.google.cloud.documentai.v1beta3.DocumentProcessorServiceClient; -import com.google.cloud.documentai.v1beta3.DocumentProcessorServiceSettings; -import com.google.cloud.documentai.v1beta3.ProcessRequest; -import com.google.cloud.documentai.v1beta3.ProcessResponse; -import com.google.cloud.documentai.v1beta3.RawDocument; -import com.google.protobuf.ByteString; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; - -public class ProcessQualityDocument { - public static void processQualityDocument() - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // TODO(developer): Replace these variables before running the sample. - String projectId = "your-project-id"; - String location = "your-project-location"; // Format is "us" or "eu". - String processerId = "your-processor-id"; - String filePath = "path/to/input/file.pdf"; - processQualityDocument(projectId, location, processerId, filePath); - } - - public static void processQualityDocument( - String projectId, String location, String processorId, String filePath) - throws IOException, InterruptedException, ExecutionException, TimeoutException { - // Initialize client that will be used to send requests. This client only needs - // to be created - // once, and can be reused for multiple requests. After completing all of your - // requests, call - // the "close" method on the client to safely clean up any remaining background - // resources. - String endpoint = String.format("%s-documentai.googleapis.com:443", location); - DocumentProcessorServiceSettings settings = - DocumentProcessorServiceSettings.newBuilder().setEndpoint(endpoint).build(); - try (DocumentProcessorServiceClient client = DocumentProcessorServiceClient.create(settings)) { - // The full resource name of the processor, e.g.: - // projects/project-id/locations/location/processor/processor-id - // You must create new processors in the Cloud Console first - String name = - String.format("projects/%s/locations/%s/processors/%s", projectId, location, processorId); - - // Read the file. - byte[] imageFileData = Files.readAllBytes(Paths.get(filePath)); - - // Convert the image data to a Buffer and base64 encode it. - ByteString content = ByteString.copyFrom(imageFileData); - - RawDocument document = - RawDocument.newBuilder().setContent(content).setMimeType("application/pdf").build(); - - // Configure the process request. - ProcessRequest request = - ProcessRequest.newBuilder().setName(name).setRawDocument(document).build(); - - // Recognizes text entities in the PDF document - ProcessResponse result = client.processDocument(request); - Document documentResponse = result.getDocument(); - - System.out.println("Document processing complete."); - - // Read the quality-specific information from the output from the - // Intelligent Document Quality Processor: - // https://cloud.google.com/document-ai/docs/processors-list#processor_doc-quality-processor - // OCR and other data is also present in the quality processor's response. - // Please see the OCR and other samples for how to parse other data in the - // response. - List entities = documentResponse.getEntitiesList(); - for (Document.Entity entity : entities) { - float entityConfidence = entity.getConfidence(); - long pageNumber = entity.getPageAnchor().getPageRefs(0).getPage() + 1; - System.out.printf( - "Page %d has a quality score of (%.2f%%):\n", pageNumber, entityConfidence * 100.0); - for (Document.Entity property : entity.getPropertiesList()) { - float propertyConfidence = property.getConfidence(); - String propertyType = property.getType(); - System.out.printf(" * %s score of %.2f%%\n", propertyType, propertyConfidence * 100.0); - } - } - } - } -} -// [END documentai_process_quality_document] diff --git a/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java b/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java deleted file mode 100644 index 7379dbf0f30..00000000000 --- a/document-ai/src/test/java/documentai/v1beta3/ProcessQualityDocumentTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package documentai.v1beta3; - -import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertNotNull; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class ProcessQualityDocumentTest { - private static final String PROJECT_ID = System.getenv("GOOGLE_CLOUD_PROJECT"); - private static final String PROCESSOR_ID = "f80f55e03d4c20ed"; - private static final String FILE_PATH = "resources/document_quality_poor.pdf"; - - private ByteArrayOutputStream bout; - private PrintStream out; - private PrintStream originalPrintStream; - - private static void requireEnvVar(String varName) { - assertNotNull( - String.format("Environment variable '%s' must be set to perform these tests.", varName), - System.getenv(varName)); - } - - @Before - public void checkRequirements() { - requireEnvVar("GOOGLE_CLOUD_PROJECT"); - requireEnvVar("GOOGLE_APPLICATION_CREDENTIALS"); - } - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - out = new PrintStream(bout); - originalPrintStream = System.out; - System.setOut(out); - } - - @Test - public void testProcessQualityDocument() - throws InterruptedException, ExecutionException, IOException, TimeoutException { - // parse the GCS invoice as a form. - ProcessQualityDocument.processQualityDocument(PROJECT_ID, "us", PROCESSOR_ID, FILE_PATH); - String got = bout.toString(); - - assertThat(got).contains("Page 1 has a quality score of"); - assertThat(got).contains("defect_blurry score of 9"); - assertThat(got).contains("defect_noisy"); - } - - @After - public void tearDown() { - System.out.flush(); - System.setOut(originalPrintStream); - } -} From fac7da37ef1125e5cc1fd74e4ad7825027c1e93b Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 10:39:29 -0500 Subject: [PATCH 45/66] chore: misc non-functional cleanup (#9750) --- .../proxy/channelpool/ChannelPool.java | 2 +- .../proxy/channelpool/DataChannel.java | 6 + .../proxy/channelpool/ResourceCollector.java | 2 +- .../examples/proxy/commands/Verify.java | 65 +++----- .../examples/proxy/core/CallLabels.java | 31 +++- .../metrics/InstrumentedCallCredentials.java | 36 ++++- .../examples/proxy/metrics/Metrics.java | 1 + .../examples/proxy/metrics/MetricsImpl.java | 142 ++++++++++++------ .../examples/proxy/metrics/Tracer.java | 6 + .../src/main/resources/logback.xml | 7 + .../examples/proxy/metrics/NoopMetrics.java | 0 11 files changed, 195 insertions(+), 103 deletions(-) rename bigtable/bigtable-proxy/src/{main => test}/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java (100%) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java index 188dd1bf131..380d97c9418 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ChannelPool.java @@ -47,7 +47,7 @@ * A {@link ManagedChannel} that will send requests round-robin via a set of channels. * *

    In addition to spreading requests over a set of child connections, the pool will also actively - * manage the lifecycle of the channels. Currently lifecycle management is limited to pre-emptively + * manage the lifecycle of the channels. Currently, lifecycle management is limited to pre-emptively * replacing channels every hour. In the future it will dynamically size the pool based on number of * outstanding requests. * diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index 7eba4e7bb4a..c61db5ea3f8 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -49,6 +49,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * Decorator for a Bigtable data plane connection to add channel warming via PingAndWarm. Channel + * warming will happen on creation and then every 3 minutes. + */ public class DataChannel extends ManagedChannel { private static final Logger LOGGER = LoggerFactory.getLogger(DataChannel.class); @@ -111,6 +115,8 @@ private void warm() { return; } + LOGGER.debug("Warming channel {} with: {}", inner, primingKeys); + List> futures = primingKeys.stream().map(this::sendPingAndWarm).collect(Collectors.toList()); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java index b9c492054f8..d36fb630ef3 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/ResourceCollector.java @@ -37,7 +37,7 @@ public void collect(CallLabels labels) { try { PrimingKey.from(labels).ifPresent(k -> primingKeys.put(k, true)); } catch (ParsingException e) { - LOG.atWarn().log("Failed to collect priming request for {}", labels, e); + LOG.warn("Failed to collect priming request for {}", labels, e); } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java index aee401c63ed..669385e4421 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Verify.java @@ -33,7 +33,6 @@ import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; import com.google.common.collect.ImmutableList; -import com.google.common.net.PercentEscaper; import com.google.protobuf.ByteString; import io.grpc.CallCredentials; import io.grpc.CallOptions; @@ -49,18 +48,13 @@ import io.grpc.MethodDescriptor; import io.grpc.StatusRuntimeException; import io.grpc.auth.MoreCallCredentials; -import io.opentelemetry.api.common.Attributes; import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; import io.opentelemetry.sdk.common.CompletableResultCode; import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.export.MetricExporter; -import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; -import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; -import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; import io.opentelemetry.sdk.resources.Resource; -import java.time.Duration; -import java.time.Instant; -import java.time.temporal.ChronoUnit; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -123,10 +117,12 @@ private void checkBigtable(CallCredentials callCredentials, String tableName) { try { Metadata md = new Metadata(); - PercentEscaper escaper = new PercentEscaper("", true); + md.put( Key.of("x-goog-request-params", Metadata.ASCII_STRING_MARSHALLER), - String.format("table_name=%s&app_profile_id=%s", escaper.escape(tableName), "")); + String.format( + "table_name=%s&app_profile_id=%s", + URLEncoder.encode(tableName, StandardCharsets.UTF_8), "")); BigtableBlockingStub stub = BigtableGrpc.newBlockingStub(channel) @@ -186,43 +182,28 @@ private void checkBigtable(CallCredentials callCredentials, String tableName) { } void checkMetrics(Credentials creds) { - Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); - Instant start = end.minus(Duration.ofMinutes(1)); + MetricConfiguration config = + MetricConfiguration.builder() + .setCredentials(creds) + .setProjectId(metricsProjectId) + .setInstrumentationLibraryLabelsEnabled(false) + .build(); GCPResourceProvider resourceProvider = new GCPResourceProvider(); Resource resource = Resource.create(resourceProvider.getAttributes()); + ImmutableList metricData = + ImmutableList.of(MetricsImpl.generateTestPresenceMeasurement(resource)); - MetricExporter exporter = - GoogleCloudMetricExporter.createWithConfiguration( - MetricConfiguration.builder() - .setCredentials(creds) - .setProjectId(metricsProjectId) - .setInstrumentationLibraryLabelsEnabled(false) - .build()); + try (MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config)) { + CompletableResultCode result = exporter.export(metricData); + result.join(1, TimeUnit.MINUTES); - ImmutableList metricData = - ImmutableList.of( - ImmutableMetricData.createLongGauge( - resource, - MetricsImpl.INSTRUMENTATION_SCOPE_INFO, - MetricsImpl.METRIC_PRESENCE_NAME, - MetricsImpl.METRIC_PRESENCE_DESC, - MetricsImpl.METRIC_PRESENCE_UNIT, - ImmutableGaugeData.create( - ImmutableList.of( - ImmutableLongPointData.create( - TimeUnit.MILLISECONDS.toNanos(start.toEpochMilli()), - TimeUnit.MILLISECONDS.toNanos(end.toEpochMilli()), - Attributes.empty(), - 1L))))); - CompletableResultCode result = exporter.export(metricData); - result.join(1, TimeUnit.MINUTES); - - System.out.println("Metrics resource: " + resource); - if (result.isSuccess()) { - System.out.println("Metrics write: OK"); - } else { - System.out.println("Metrics write: FAILED: " + result.getFailureThrowable().getMessage()); + System.out.println("Metrics resource: " + resource); + if (result.isSuccess()) { + System.out.println("Metrics write: OK"); + } else { + System.out.println("Metrics write: FAILED: " + result.getFailureThrowable().getMessage()); + } } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java index c15448ad717..cdd3c6f5e38 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallLabels.java @@ -41,6 +41,10 @@ * *

      *
    • {@code x-goog-request-params} - contains the resource and app profile id + *
    • {@code google-cloud-resource-prefix} - the previous version of {@code + * x-goog-request-params}, used as a fallback + *
    • {@code x-goog-cbt-cookie-routing} - an opaque blob used to routing RPCs on the serverside + *
    • {@code bigtable-features} - the client's available features *
    • {@code x-goog-api-client} - contains the client info of the downstream client *
    */ @@ -111,6 +115,7 @@ public static CallLabels create(MethodDescriptor method, Metadata headers) method, requestParams, legacyResourcePrefix, routingCookie, encodedFeatures, apiClient); } + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") @VisibleForTesting public static CallLabels create( MethodDescriptor method, @@ -129,6 +134,12 @@ public static CallLabels create( apiClient); } + /** + * Extracts the resource name, will use {@link #getRequestParams()} if present, otherwise falls + * back on {@link #getLegacyResourcePrefix()}. If neither is present, {@link Optional#empty()} is + * returned. If there was an issue extracting, a {@link ParsingException} is thrown. In the + * primary case, the value will be url decoded. + */ public Optional extractResourceName() throws ParsingException { if (getRequestParams().isEmpty()) { return getLegacyResourcePrefix(); @@ -167,17 +178,19 @@ public Optional extractResourceName() throws ParsingException { return resourceName.map(ResourceName::getValue); } - private static Optional findType(String encodedKey) throws ParsingException { - String decodedKey = percentDecode(encodedKey); - + private static Optional findType(String key) { for (ResourceNameType type : ResourceNameType.values()) { - if (type.name.equals(decodedKey)) { + if (type.name.equals(key)) { return Optional.of(type); } } return Optional.empty(); } + /** + * Extracts the app profile id from {@link #getRequestParams()}. Returns {@link Optional#empty()} + * if the key is missing. The value will be url decoded. + */ public Optional extractAppProfileId() throws ParsingException { String requestParams = getRequestParams().orElse(""); @@ -200,13 +213,17 @@ private static String percentDecode(String s) throws ParsingException { } } + /** + * Can be derived from {@link CallLabels} to create a priming request to keep the channel active + * for future RPCs. + */ @AutoValue public abstract static class PrimingKey { - abstract Map getMetadata(); + protected abstract Map getMetadata(); - abstract String getName(); + protected abstract String getName(); - abstract Optional getAppProfileId(); + protected abstract Optional getAppProfileId(); public static Optional from(CallLabels labels) throws ParsingException { final ImmutableMap.Builder md = ImmutableMap.builder(); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java index 9f70124f600..14d1454a22f 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/InstrumentedCallCredentials.java @@ -16,18 +16,40 @@ package com.google.cloud.bigtable.examples.proxy.metrics; +import com.google.cloud.bigtable.examples.proxy.channelpool.DataChannel; +import com.google.cloud.bigtable.examples.proxy.core.CallLabels.PrimingKey; +import com.google.cloud.bigtable.examples.proxy.core.ProxyHandler; import com.google.common.base.Stopwatch; import io.grpc.CallCredentials; +import io.grpc.CallOptions; import io.grpc.InternalMayRequireSpecificExecutor; import io.grpc.Metadata; +import io.grpc.ServerCall; import io.grpc.Status; import java.time.Duration; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +/** + * {@link CallCredentials} decorator that tracks latency for fetching credentials. + * + *

    This expects that all RPCs that use these credentials embed a {@link Tracer} in the {@link + * io.grpc.CallOptions} using {@link Tracer#injectIntoCallOptions(CallOptions)}. + * + *

    Known callers: + * + *

      + *
    • {@link DataChannel#sendPingAndWarm(PrimingKey)} + *
    • {@link ProxyHandler#startCall(ServerCall, Metadata)} + *
    + */ public class InstrumentedCallCredentials extends CallCredentials implements InternalMayRequireSpecificExecutor { + private static final Logger LOG = LoggerFactory.getLogger(InstrumentedCallCredentials.class); + private final CallCredentials inner; private final boolean specificExecutorRequired; @@ -56,15 +78,21 @@ public void applyRequestMetadata( new MetadataApplier() { @Override public void apply(Metadata headers) { - tracer.onCredentialsFetch( - Status.OK, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + Duration latency = Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + // Most credentials fetches should very fast because they are cached + if (latency.compareTo(Duration.ofMillis(1)) >= 1) { + LOG.debug("Fetching Credentials took {}", latency); + } + tracer.onCredentialsFetch(Status.OK, latency); applier.apply(headers); } @Override public void fail(Status status) { - tracer.onCredentialsFetch( - status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + Duration latency = Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + + LOG.warn("Failed to fetch Credentials after {}: {}", latency, status); + tracer.onCredentialsFetch(status, latency); applier.fail(status); } }); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index 3c8d5e9cb24..d6571989962 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -21,6 +21,7 @@ import io.grpc.Status; import java.time.Duration; +/** Interface for tracking measurements across the application. */ public interface Metrics { MetricsAttributes createAttributes(CallLabels callLabels); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 27ae626f271..a840a8f506d 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -22,7 +22,10 @@ import com.google.cloud.bigtable.examples.proxy.core.CallLabels.ParsingException; import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; import io.grpc.Status; +import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; @@ -32,37 +35,50 @@ import io.opentelemetry.api.metrics.LongUpDownCounter; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.api.metrics.ObservableLongGauge; import io.opentelemetry.contrib.gcp.resource.GCPResourceProvider; import io.opentelemetry.sdk.common.InstrumentationScopeInfo; import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableGaugeData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; import io.opentelemetry.sdk.resources.Resource; import java.io.Closeable; import java.io.IOException; import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * Central definition of all the {@link OpenTelemetry} metrics in this application. + * + *

    The metric definition themselves are only accessible via typesafe record methods. + */ +@SuppressWarnings("ClassEscapesDefinedScope") public class MetricsImpl implements Closeable, Metrics { private static final Logger LOG = LoggerFactory.getLogger(MetricsImpl.class); - public static final InstrumentationScopeInfo INSTRUMENTATION_SCOPE_INFO = - InstrumentationScopeInfo.create("bigtable-proxy"); + private static final InstrumentationScopeInfo INSTRUMENTATION_SCOPE_INFO = + InstrumentationScopeInfo.builder("bigtable-proxy").setVersion("0.0.1").build(); - public static final String METRIC_PREFIX = "bigtableproxy."; + private static final String METRIC_PREFIX = "bigtableproxy."; - public static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); - public static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); - public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); - public static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); - public static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + private static final AttributeKey API_CLIENT_KEY = AttributeKey.stringKey("api_client"); + private static final AttributeKey RESOURCE_KEY = AttributeKey.stringKey("resource"); + private static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + private static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + private static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); - public static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; - public static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; - public static final String METRIC_PRESENCE_UNIT = "{process}"; + private static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; + private static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; + private static final String METRIC_PRESENCE_UNIT = "{process}"; private final MeterProvider meterProvider; @@ -75,19 +91,37 @@ public class MetricsImpl implements Closeable, Metrics { private final LongHistogram requestSizes; private final LongHistogram responseSizes; + private final ObservableLongGauge outstandingRpcCountGauge; + private final ObservableLongGauge presenceGauge; + private final LongUpDownCounter channelCounter; private final AtomicInteger numOutstandingRpcs = new AtomicInteger(); private final AtomicInteger maxSeen = new AtomicInteger(); - static Supplier gcpResourceSupplier = - () -> Resource.create(new GCPResourceProvider().getAttributes()); - public MetricsImpl(Credentials credentials, String projectId) throws IOException { this(createMeterProvider(credentials, projectId)); } + private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { + MetricConfiguration config = + MetricConfiguration.builder() + .setProjectId(projectId) + .setCredentials(credentials) + .setInstrumentationLibraryLabelsEnabled(false) + .build(); + + MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config); + + return SdkMeterProvider.builder() + .setResource(Resource.create(new GCPResourceProvider().getAttributes())) + .registerMetricReader( + PeriodicMetricReader.builder(exporter).setInterval(Duration.ofMinutes(1)).build()) + .build(); + } + MetricsImpl(MeterProvider meterProvider) { this.meterProvider = meterProvider; + @SuppressWarnings("DataFlowIssue") Meter meter = meterProvider .meterBuilder(INSTRUMENTATION_SCOPE_INFO.getName()) @@ -168,23 +202,28 @@ public MetricsImpl(Credentials credentials, String projectId) throws IOException .setUnit("{channel}") .build(); - meter - .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") - .setDescription("Maximum number of concurrent RPCs in a single minute window") - .setUnit("{call}") - .ofLongs() - .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); - - meter - .gaugeBuilder(METRIC_PRESENCE_NAME) - .setDescription(METRIC_PRESENCE_DESC) - .setUnit(METRIC_PRESENCE_UNIT) - .ofLongs() - .buildWithCallback(o -> o.record(1)); + outstandingRpcCountGauge = + meter + .gaugeBuilder(METRIC_PREFIX + "client.call.max_outstanding_count") + .setDescription("Maximum number of concurrent RPCs in a single minute window") + .setUnit("{call}") + .ofLongs() + .buildWithCallback(o -> o.record(maxSeen.getAndSet(0))); + + presenceGauge = + meter + .gaugeBuilder(METRIC_PRESENCE_NAME) + .setDescription(METRIC_PRESENCE_DESC) + .setUnit(METRIC_PRESENCE_UNIT) + .ofLongs() + .buildWithCallback(o -> o.record(1)); } @Override public void close() throws IOException { + outstandingRpcCountGauge.close(); + presenceGauge.close(); + if (meterProvider instanceof Closeable) { ((Closeable) meterProvider).close(); } @@ -201,7 +240,7 @@ public MetricsAttributesImpl createAttributes(CallLabels callLabels) { try { resourceValue = callLabels.extractResourceName().orElse(""); } catch (ParsingException e) { - LOG.atWarn().log("Failed to extract resource from callLabels: {}", callLabels, e); + LOG.warn("Failed to extract resource from callLabels: {}", callLabels, e); resourceValue = ""; } attrs.put(MetricsImpl.RESOURCE_KEY, resourceValue); @@ -210,7 +249,7 @@ public MetricsAttributesImpl createAttributes(CallLabels callLabels) { try { appProfile = callLabels.extractAppProfileId().orElse(""); } catch (ParsingException e) { - LOG.atWarn().log("Failed to extract app profile from callLabels: {}", callLabels, e); + LOG.warn("Failed to extract app profile from callLabels: {}", callLabels, e); appProfile = ""; } attrs.put(MetricsImpl.APP_PROFILE_KEY, appProfile); @@ -218,23 +257,6 @@ public MetricsAttributesImpl createAttributes(CallLabels callLabels) { return new AutoValue_MetricsImpl_MetricsAttributesImpl(attrs.build()); } - private static SdkMeterProvider createMeterProvider(Credentials credentials, String projectId) { - MetricConfiguration config = - MetricConfiguration.builder() - .setProjectId(projectId) - .setCredentials(credentials) - .setInstrumentationLibraryLabelsEnabled(false) - .build(); - - MetricExporter exporter = GoogleCloudMetricExporter.createWithConfiguration(config); - - return SdkMeterProvider.builder() - .setResource(gcpResourceSupplier.get()) - .registerMetricReader( - PeriodicMetricReader.builder(exporter).setInterval(Duration.ofMinutes(1)).build()) - .build(); - } - @Override public void recordCallStarted(MetricsAttributes attrs) { serverCallsStarted.add(1, unwrap(attrs)); @@ -289,10 +311,34 @@ public void updateChannelCount(int delta) { channelCounter.add(delta); } - static Attributes unwrap(MetricsAttributes wrapped) { + private static Attributes unwrap(MetricsAttributes wrapped) { return ((MetricsAttributesImpl) wrapped).getAttributes(); } + /** + * Generate a test data point to test permissions for exporting metrics. Used in {@link + * com.google.cloud.bigtable.examples.proxy.commands.Verify}. + */ + public static MetricData generateTestPresenceMeasurement(Resource resource) { + Instant end = Instant.now().truncatedTo(ChronoUnit.MINUTES); + Instant start = end.minus(Duration.ofMinutes(1)); + + return ImmutableMetricData.createLongGauge( + resource, + INSTRUMENTATION_SCOPE_INFO, + METRIC_PRESENCE_NAME, + METRIC_PRESENCE_DESC, + METRIC_PRESENCE_UNIT, + ImmutableGaugeData.create( + ImmutableList.of( + ImmutableLongPointData.create( + TimeUnit.MILLISECONDS.toNanos(start.toEpochMilli()), + TimeUnit.MILLISECONDS.toNanos(end.toEpochMilli()), + Attributes.empty(), + 1L)))); + } + + @VisibleForTesting @AutoValue abstract static class MetricsAttributesImpl implements MetricsAttributes { abstract Attributes getAttributes(); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index a14ba9ece5e..3dfd3424b23 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -32,6 +32,12 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +/** + * RPC lifecycle tracer. + * + *

    It hooks into both gRPC RPC lifecycle and this application. It combines the extracted {@link + * CallLabels} with {@link Metrics} recording. + */ public class Tracer extends ClientStreamTracer { private static final Key CALL_OPTION_KEY = Key.create("bigtable-proxy-tracer"); diff --git a/bigtable/bigtable-proxy/src/main/resources/logback.xml b/bigtable/bigtable-proxy/src/main/resources/logback.xml index 4b19e5c3773..b2f4edd122e 100644 --- a/bigtable/bigtable-proxy/src/main/resources/logback.xml +++ b/bigtable/bigtable-proxy/src/main/resources/logback.xml @@ -8,6 +8,13 @@ + + + + + + + diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java similarity index 100% rename from bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java rename to bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java From 5698028ab13d37bda4b83ca47888d7a2b7a7bad2 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 10:50:22 -0500 Subject: [PATCH 46/66] feat: add jitter to anti idle task (#9751) NOTE: this depends on and includes https://togithub.com/GoogleCloudPlatform/java-docs-samples/pull/9750 --- .../proxy/channelpool/DataChannel.java | 53 ++++++++++++++++--- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index c61db5ea3f8..1a04ceb209c 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -38,8 +38,10 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import io.grpc.StatusRuntimeException; +import java.time.Duration; import java.util.List; import java.util.Optional; +import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -51,18 +53,24 @@ /** * Decorator for a Bigtable data plane connection to add channel warming via PingAndWarm. Channel - * warming will happen on creation and then every 3 minutes. + * warming will happen on creation and then every 3 minutes (with jitter). */ public class DataChannel extends ManagedChannel { private static final Logger LOGGER = LoggerFactory.getLogger(DataChannel.class); + private static final Duration WARM_PERIOD = Duration.ofMinutes(3); + private static final Duration MAX_JITTER = Duration.ofSeconds(10); + + private final Random random = new Random(); private final ManagedChannel inner; private final Metrics metrics; private final ResourceCollector resourceCollector; private final CallCredentials callCredentials; - private final ScheduledFuture antiIdleTask; + private final ScheduledExecutorService warmingExecutor; + private volatile ScheduledFuture antiIdleTask; private final AtomicBoolean closed = new AtomicBoolean(); + private final Object scheduleLock = new Object(); public DataChannel( ResourceCollector resourceCollector, @@ -83,6 +91,8 @@ public DataChannel( .keepAliveTime(30, TimeUnit.SECONDS) .keepAliveTimeout(10, TimeUnit.SECONDS) .build(); + + this.warmingExecutor = warmingExecutor; this.metrics = metrics; try { @@ -96,16 +106,30 @@ public DataChannel( throw e; } - antiIdleTask = warmingExecutor.scheduleAtFixedRate(this::warmQuietly, 3, 3, TimeUnit.MINUTES); + antiIdleTask = + warmingExecutor.schedule(this::warmTask, nextWarmup().toMillis(), TimeUnit.MILLISECONDS); metrics.updateChannelCount(1); } - private void warmQuietly() { + private Duration nextWarmup() { + return WARM_PERIOD.minus( + Duration.ofMillis((long) (MAX_JITTER.toMillis() * random.nextDouble()))); + } + + private void warmTask() { try { warm(); } catch (RuntimeException e) { LOGGER.warn("anti idle ping failed, forcing reconnect", e); inner.enterIdle(); + } finally { + synchronized (scheduleLock) { + if (!closed.get()) { + antiIdleTask = + warmingExecutor.schedule( + this::warmTask, nextWarmup().toMillis(), TimeUnit.MILLISECONDS); + } + } } } @@ -204,10 +228,16 @@ public void onClose(Status status, Metadata trailers) { @Override public ManagedChannel shutdown() { - if (closed.compareAndSet(false, true)) { + final boolean closing; + + synchronized (scheduleLock) { + closing = closed.compareAndSet(false, true); + antiIdleTask.cancel(true); + } + if (closing) { metrics.updateChannelCount(-1); } - antiIdleTask.cancel(true); + return inner.shutdown(); } @@ -223,10 +253,17 @@ public boolean isTerminated() { @Override public ManagedChannel shutdownNow() { - if (closed.compareAndSet(false, true)) { + final boolean closing; + + synchronized (scheduleLock) { + closing = closed.compareAndSet(false, true); + antiIdleTask.cancel(true); + } + + if (closing) { metrics.updateChannelCount(-1); } - antiIdleTask.cancel(true); + return inner.shutdownNow(); } From 89865ffbdf037d70574825dd120d002a95b52cfa Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 10:55:42 -0500 Subject: [PATCH 47/66] fix: optimize channel pool settings to allow pool to down size with anti idle pings (#9752) * chore: misc non-functional cleanup * feat: add jitter to anti idle task * fix: optimize channel pool settings to allow pool to down size with anti idle pings --- .../cloud/bigtable/examples/proxy/commands/Serve.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java index 803c2de9bf4..797c861632d 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/commands/Serve.java @@ -115,12 +115,12 @@ void start() throws IOException { ResourceCollector resourceCollector = new ResourceCollector(); refreshExecutor = Executors.newSingleThreadScheduledExecutor(); - // From - // https://github.com/googleapis/java-bigtable/blob/e0ce2fe3c1207731d15e56faec66ba099652b87c/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java#L406-L410 ChannelPoolSettings poolSettings = ChannelPoolSettings.builder() .setInitialChannelCount(10) - .setMinRpcsPerChannel(1) + .setMinChannelCount(2) + .setMaxChannelCount(20) + .setMinRpcsPerChannel(5) .setMaxRpcsPerChannel(50) .setPreemptiveRefreshEnabled(true) .build(); From 371ba8f70517000307f26ae80db63c678729f816 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 11:04:20 -0500 Subject: [PATCH 48/66] feat: bigtable-proxy add gfe debug headers (#9753) --- .../proxy/channelpool/DataChannel.java | 66 +++++++++++++++++-- 1 file changed, 59 insertions(+), 7 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index 1a04ceb209c..988258ea3a4 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -35,9 +35,9 @@ import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.Metadata; +import io.grpc.Metadata.Key; import io.grpc.MethodDescriptor; import io.grpc.Status; -import io.grpc.StatusRuntimeException; import java.time.Duration; import java.util.List; import java.util.Optional; @@ -58,6 +58,11 @@ public class DataChannel extends ManagedChannel { private static final Logger LOGGER = LoggerFactory.getLogger(DataChannel.class); + private static final Metadata.Key GFE_DEBUG_REQ_HEADER = + Key.of("X-Return-Encrypted-Headers", Metadata.ASCII_STRING_MARSHALLER); + private static final Metadata.Key GFE_DEBUG_RESP_HEADER = + Key.of("X-Encrypted-Debug-Headers", Metadata.ASCII_STRING_MARSHALLER); + private static final Duration WARM_PERIOD = Duration.ofMinutes(3); private static final Duration MAX_JITTER = Duration.ofSeconds(10); @@ -152,10 +157,11 @@ private void warm() { future.get(); successCount++; } catch (ExecutionException e) { - // All permenant errors are ignored and treated as a success + // All permanent errors are ignored and treated as a success // The priming request for that generated the error will be dropped - if (e.getCause() instanceof StatusRuntimeException) { - StatusRuntimeException se = (StatusRuntimeException) e.getCause(); + if (e.getCause() instanceof PingAndWarmException) { + PingAndWarmException se = (PingAndWarmException) e.getCause(); + switch (se.getStatus().getCode()) { case INTERNAL: case PERMISSION_DENIED: @@ -168,8 +174,15 @@ private void warm() { default: // noop } + LOGGER.warn( + "Failed to prime channel with request: {}, status: {}, debug response headers: {}", + request, + se.getStatus(), + Optional.ofNullable(se.getDebugHeaders()).orElse("")); + } else { + LOGGER.warn("Unexpected failure priming channel with request: {}", request, e.getCause()); } - LOGGER.warn("Failed to prime channel with request: {}", request, e.getCause()); + failures++; } catch (InterruptedException e) { throw new RuntimeException("Interrupted while priming channel with request: " + request, e); @@ -182,7 +195,9 @@ private void warm() { private ListenableFuture sendPingAndWarm(PrimingKey primingKey) { Metadata metadata = primingKey.composeMetadata(); + metadata.put(GFE_DEBUG_REQ_HEADER, "gfe_response_only"); PingAndWarmRequest request = primingKey.composeProto(); + request = request.toBuilder().setName(request.getName()).build(); CallLabels callLabels = CallLabels.create(BigtableGrpc.getPingAndWarmMethod(), metadata); Tracer tracer = new Tracer(metrics, callLabels); @@ -199,6 +214,8 @@ private ListenableFuture sendPingAndWarm(PrimingKey priming SettableFuture f = SettableFuture.create(); call.start( new Listener<>() { + String debugHeaders = null; + @Override public void onMessage(PingAndWarmResponse response) { if (!f.set(response)) { @@ -207,14 +224,22 @@ public void onMessage(PingAndWarmResponse response) { } } + @Override + public void onHeaders(Metadata headers) { + debugHeaders = headers.get(GFE_DEBUG_RESP_HEADER); + } + @Override public void onClose(Status status, Metadata trailers) { tracer.onCallFinished(status); if (status.isOk()) { - f.setException(new IllegalStateException("PingAndWarm was missing a response")); + f.setException( + new PingAndWarmException( + "PingAndWarm was missing a response", debugHeaders, trailers, status)); } else { - f.setException(status.asRuntimeException()); + f.setException( + new PingAndWarmException("PingAndWarm failed", debugHeaders, trailers, status)); } } }, @@ -226,6 +251,33 @@ public void onClose(Status status, Metadata trailers) { return f; } + static class PingAndWarmException extends RuntimeException { + + private final String debugHeaders; + private final Metadata trailers; + private final Status status; + + public PingAndWarmException( + String message, String debugHeaders, Metadata trailers, Status status) { + super(String.format("PingAndWarm failed, status: " + status)); + this.debugHeaders = debugHeaders; + this.trailers = trailers; + this.status = status; + } + + public String getDebugHeaders() { + return debugHeaders; + } + + public Metadata getTrailers() { + return trailers; + } + + public Status getStatus() { + return status; + } + } + @Override public ManagedChannel shutdown() { final boolean closing; From 305697cc1245799d107a81b88495cf94962ba3fe Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 11:14:20 -0500 Subject: [PATCH 49/66] feat: add first byte latency metric (#9754) NOTE: this depends on and includes https://togithub.com/GoogleCloudPlatform/java-docs-samples/pull/9753 --- .../bigtable/examples/proxy/metrics/Metrics.java | 2 ++ .../examples/proxy/metrics/MetricsImpl.java | 13 +++++++++++++ .../bigtable/examples/proxy/metrics/Tracer.java | 8 ++++++++ .../examples/proxy/metrics/NoopMetrics.java | 3 +++ 4 files changed, 26 insertions(+) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index d6571989962..c9fbbe3c271 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -41,6 +41,8 @@ public interface Metrics { void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration); + void recordFirstByteLatency(MetricsAttributes attrs, Duration duration); + void updateChannelCount(int delta); interface MetricsAttributes {} diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index a840a8f506d..8a7a44ca18c 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -87,6 +87,7 @@ public class MetricsImpl implements Closeable, Metrics { private final DoubleHistogram clientCredLatencies; private final DoubleHistogram clientQueueLatencies; private final DoubleHistogram clientCallLatencies; + private final DoubleHistogram clientCallFirstByteLatencies; private final LongCounter serverCallsStarted; private final LongHistogram requestSizes; private final LongHistogram responseSizes; @@ -195,6 +196,13 @@ private static SdkMeterProvider createMeterProvider(Credentials credentials, Str .setUnit("ms") .build(); + clientCallFirstByteLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "client.first_byte.duration") + .setDescription("Latency from start of request until first response is received") + .setUnit("ms") + .build(); + channelCounter = meter .upDownCounterBuilder(METRIC_PREFIX + "client.channel.count") @@ -306,6 +314,11 @@ public void recordCallLatency(MetricsAttributes attrs, Status status, Duration d numOutstandingRpcs.decrementAndGet(); } + @Override + public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) { + clientCallFirstByteLatencies.record(duration.toMillis(), unwrap(attrs)); + } + @Override public void updateChannelCount(int delta) { channelCounter.add(delta); diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index 3dfd3424b23..b2053a23245 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -106,6 +106,14 @@ public void inboundHeaders(Metadata headers) { d -> metrics.recordGfeLatency(attrs, d), () -> metrics.recordGfeHeaderMissing(attrs)); } + @Override + public void inboundMessage(int seqNo) { + if (seqNo == 0) { + metrics.recordFirstByteLatency( + attrs, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); + } + } + public void onCallFinished(Status status) { grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(attrs, d)); metrics.recordResponseSize(attrs, responseSize.get()); diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index 7da029f9ba1..916842e509b 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -51,6 +51,9 @@ public void recordGfeHeaderMissing(MetricsAttributes attrs) {} @Override public void recordCallLatency(MetricsAttributes attrs, Status status, Duration duration) {} + @Override + public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) {} + @Override public void updateChannelCount(int delta) {} } From dcaa152a6fdf1ee8809b0fa3427dcbf54f793bf2 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 11:24:21 -0500 Subject: [PATCH 50/66] feat: add channel state transition counter (#9755) NOTE: this depends on and includes https://togithub.com/GoogleCloudPlatform/java-docs-samples/pull/9754 --- bigtable/bigtable-proxy/README.md | 2 ++ .../proxy/channelpool/DataChannel.java | 18 ++++++++++++ .../examples/proxy/metrics/Metrics.java | 3 ++ .../examples/proxy/metrics/MetricsImpl.java | 28 +++++++++++++++++++ .../examples/proxy/metrics/NoopMetrics.java | 4 +++ 5 files changed, 55 insertions(+) diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md index 48140f77a58..89ac2aeb882 100644 --- a/bigtable/bigtable-proxy/README.md +++ b/bigtable/bigtable-proxy/README.md @@ -46,6 +46,8 @@ in a project your choosing. The metrics will be published under the namespace * `bigtableproxy.client.gfe.duration_missing.count` Count of calls missing gfe response headers * `bigtableproxy.client.call.duration` Total duration of how long the outbound call took * `bigtableproxy.client.channel.count` Number of open channels +* `bigtableproxy.client.channel_change_count` Number of channel transitions by previous and next + states. * `bigtableproxy.client.call.max_outstanding_count` Maximum number of concurrent RPCs in a single minute window * `bigtableproxy.presence` Counts number of proxy processes (emit 1 per process). diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java index 988258ea3a4..a2b3dd7fced 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/channelpool/DataChannel.java @@ -100,6 +100,8 @@ public DataChannel( this.warmingExecutor = warmingExecutor; this.metrics = metrics; + new StateTransitionWatcher().run(); + try { warm(); } catch (RuntimeException e) { @@ -366,4 +368,20 @@ public ClientCall newCall( public String authority() { return inner.authority(); } + + class StateTransitionWatcher implements Runnable { + private ConnectivityState prevState = null; + + @Override + public void run() { + if (closed.get()) { + return; + } + + ConnectivityState newState = inner.getState(false); + metrics.recordChannelStateChange(prevState, newState); + prevState = newState; + inner.notifyWhenStateChanged(prevState, this); + } + } } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index c9fbbe3c271..ce7f0fe3432 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -18,6 +18,7 @@ import com.google.cloud.bigtable.examples.proxy.core.CallLabels; import com.google.cloud.bigtable.examples.proxy.metrics.Metrics.MetricsAttributes; +import io.grpc.ConnectivityState; import io.grpc.Status; import java.time.Duration; @@ -45,5 +46,7 @@ public interface Metrics { void updateChannelCount(int delta); + void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState); + interface MetricsAttributes {} } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 8a7a44ca18c..a65bd6e6f53 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -24,6 +24,7 @@ import com.google.cloud.opentelemetry.metric.MetricConfiguration; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import io.grpc.ConnectivityState; import io.grpc.Status; import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributeKey; @@ -51,6 +52,7 @@ import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; @@ -76,6 +78,11 @@ public class MetricsImpl implements Closeable, Metrics { private static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); private static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + private static final AttributeKey PREV_CHANNEL_STATE = + AttributeKey.stringKey("prev_state"); + private static final AttributeKey CURRENT_CHANNEL_STATE = + AttributeKey.stringKey("current_state"); + private static final String METRIC_PRESENCE_NAME = METRIC_PREFIX + "presence"; private static final String METRIC_PRESENCE_DESC = "Number of proxy processes"; private static final String METRIC_PRESENCE_UNIT = "{process}"; @@ -91,6 +98,7 @@ public class MetricsImpl implements Closeable, Metrics { private final LongCounter serverCallsStarted; private final LongHistogram requestSizes; private final LongHistogram responseSizes; + private final LongCounter channelStateChangeCounter; private final ObservableLongGauge outstandingRpcCountGauge; private final ObservableLongGauge presenceGauge; @@ -225,6 +233,13 @@ private static SdkMeterProvider createMeterProvider(Credentials credentials, Str .setUnit(METRIC_PRESENCE_UNIT) .ofLongs() .buildWithCallback(o -> o.record(1)); + + channelStateChangeCounter = + meter + .counterBuilder(METRIC_PREFIX + "client.channel_change_count") + .setDescription("Counter of channel state transitions") + .setUnit("{change}") + .build(); } @Override @@ -324,6 +339,19 @@ public void updateChannelCount(int delta) { channelCounter.add(delta); } + @Override + public void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState) { + Attributes attributes = + Attributes.builder() + .put( + PREV_CHANNEL_STATE, Optional.ofNullable(prevState).map(Enum::name).orElse("")) + .put( + CURRENT_CHANNEL_STATE, + Optional.ofNullable(newState).map(Enum::name).orElse("")) + .build(); + channelStateChangeCounter.add(1, attributes); + } + private static Attributes unwrap(MetricsAttributes wrapped) { return ((MetricsAttributesImpl) wrapped).getAttributes(); } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index 916842e509b..df99b56d335 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.metrics; import com.google.cloud.bigtable.examples.proxy.core.CallLabels; +import io.grpc.ConnectivityState; import io.grpc.Status; import java.time.Duration; @@ -56,4 +57,7 @@ public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) { @Override public void updateChannelCount(int delta) {} + + @Override + public void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState) {} } From 40877f98b5fc4a616d9afa0fb86731ee1b1429e5 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 11:34:25 -0500 Subject: [PATCH 51/66] chore: capture sub-ms latency measurements (#9789) --- .../examples/proxy/metrics/MetricsImpl.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index a65bd6e6f53..8c6f0d6b61e 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -292,12 +292,12 @@ public void recordCallStarted(MetricsAttributes attrs) { public void recordCredLatency(MetricsAttributes attrs, Status status, Duration duration) { Attributes attributes = unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); - clientCredLatencies.record(duration.toMillis(), attributes); + clientCredLatencies.record(toMs(duration), attributes); } @Override public void recordQueueLatency(MetricsAttributes attrs, Duration duration) { - clientQueueLatencies.record(duration.toMillis(), unwrap(attrs)); + clientQueueLatencies.record(toMs(duration), unwrap(attrs)); } @Override @@ -312,7 +312,7 @@ public void recordResponseSize(MetricsAttributes attrs, long size) { @Override public void recordGfeLatency(MetricsAttributes attrs, Duration duration) { - gfeLatency.record(duration.toMillis(), unwrap(attrs)); + gfeLatency.record(toMs(duration), unwrap(attrs)); } @Override @@ -325,13 +325,13 @@ public void recordCallLatency(MetricsAttributes attrs, Status status, Duration d Attributes attributes = unwrap(attrs).toBuilder().put(STATUS_KEY, status.getCode().name()).build(); - clientCallLatencies.record(duration.toMillis(), attributes); + clientCallLatencies.record(toMs(duration), attributes); numOutstandingRpcs.decrementAndGet(); } @Override public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) { - clientCallFirstByteLatencies.record(duration.toMillis(), unwrap(attrs)); + clientCallFirstByteLatencies.record(toMs(duration), unwrap(attrs)); } @Override @@ -352,6 +352,10 @@ public void recordChannelStateChange(ConnectivityState prevState, ConnectivitySt channelStateChangeCounter.add(1, attributes); } + private static double toMs(Duration duration) { + return duration.toNanos() / 1_000_000.0; + } + private static Attributes unwrap(MetricsAttributes wrapped) { return ((MetricsAttributesImpl) wrapped).getAttributes(); } From 398933fa4143aa208c1598b41dbd128892fa1e16 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 3 Dec 2024 11:44:25 -0500 Subject: [PATCH 52/66] feat: add downstream write latency to bigtableproxy (#9790) NOTE: this depends on and includes https://togithub.com/GoogleCloudPlatform/java-docs-samples/pull/9789 --- bigtable/bigtable-proxy/README.md | 2 ++ .../bigtable/examples/proxy/core/CallProxy.java | 8 ++++++++ .../bigtable/examples/proxy/metrics/Metrics.java | 2 ++ .../examples/proxy/metrics/MetricsImpl.java | 15 +++++++++++++++ .../bigtable/examples/proxy/metrics/Tracer.java | 6 ++++++ .../examples/proxy/metrics/NoopMetrics.java | 3 +++ 6 files changed, 36 insertions(+) diff --git a/bigtable/bigtable-proxy/README.md b/bigtable/bigtable-proxy/README.md index 89ac2aeb882..d3e7b4d916e 100644 --- a/bigtable/bigtable-proxy/README.md +++ b/bigtable/bigtable-proxy/README.md @@ -45,6 +45,8 @@ in a project your choosing. The metrics will be published under the namespace Cloud Bigtable service. * `bigtableproxy.client.gfe.duration_missing.count` Count of calls missing gfe response headers * `bigtableproxy.client.call.duration` Total duration of how long the outbound call took +* `bigtableproxy.server.write_wait.duration` Total amount of time spent waiting for the downstream + client to be ready for data. * `bigtableproxy.client.channel.count` Number of open channels * `bigtableproxy.client.channel_change_count` Number of channel transitions by previous and next states. diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java index 8e4c97db0b7..6285bc5896f 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/core/CallProxy.java @@ -17,6 +17,7 @@ package com.google.cloud.bigtable.examples.proxy.core; import com.google.cloud.bigtable.examples.proxy.metrics.Tracer; +import com.google.common.base.Stopwatch; import io.grpc.ClientCall; import io.grpc.Metadata; import io.grpc.ServerCall; @@ -30,6 +31,8 @@ class CallProxy { final RequestProxy serverCallListener; final ResponseProxy clientCallListener; + private final Stopwatch downstreamStopwatch = Stopwatch.createUnstarted(); + /** * @param tracer a lifecycle observer to publish metrics. * @param serverCall the incoming server call. This will be triggered a customer client. @@ -157,6 +160,7 @@ public void onMessage(RespT message) { // The incoming call is not ready for more responses. Stop requesting additional data // and wait for it to catch up. needToRequest = true; + downstreamStopwatch.reset().start(); } } } @@ -169,6 +173,10 @@ public void onReady() { // Called from RequestProxy, which is a different thread than the ClientCall.Listener // callbacks. synchronized void onServerReady() { + if (downstreamStopwatch.isRunning()) { + tracer.onDownstreamLatency(downstreamStopwatch.elapsed()); + downstreamStopwatch.stop(); + } if (needToRequest) { serverCallListener.clientCall.request(1); needToRequest = false; diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java index ce7f0fe3432..007d84471e9 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Metrics.java @@ -48,5 +48,7 @@ public interface Metrics { void recordChannelStateChange(ConnectivityState prevState, ConnectivityState newState); + void recordDownstreamLatency(MetricsAttributes attrs, Duration latency); + interface MetricsAttributes {} } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java index 8c6f0d6b61e..a5f9a2ce409 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/MetricsImpl.java @@ -95,6 +95,7 @@ public class MetricsImpl implements Closeable, Metrics { private final DoubleHistogram clientQueueLatencies; private final DoubleHistogram clientCallLatencies; private final DoubleHistogram clientCallFirstByteLatencies; + private final DoubleHistogram downstreamLatencies; private final LongCounter serverCallsStarted; private final LongHistogram requestSizes; private final LongHistogram responseSizes; @@ -211,6 +212,15 @@ private static SdkMeterProvider createMeterProvider(Credentials credentials, Str .setUnit("ms") .build(); + downstreamLatencies = + meter + .histogramBuilder(METRIC_PREFIX + "server.write_wait.duration") + .setDescription( + "Total amount of time spent waiting for the downstream client to be" + + " ready for data") + .setUnit("ms") + .build(); + channelCounter = meter .upDownCounterBuilder(METRIC_PREFIX + "client.channel.count") @@ -352,6 +362,11 @@ public void recordChannelStateChange(ConnectivityState prevState, ConnectivitySt channelStateChangeCounter.add(1, attributes); } + @Override + public void recordDownstreamLatency(MetricsAttributes attrs, Duration latency) { + downstreamLatencies.record(toMs(latency), unwrap(attrs)); + } + private static double toMs(Duration duration) { return duration.toNanos() / 1_000_000.0; } diff --git a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java index b2053a23245..b0162ede05f 100644 --- a/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java +++ b/bigtable/bigtable-proxy/src/main/java/com/google/cloud/bigtable/examples/proxy/metrics/Tracer.java @@ -51,6 +51,7 @@ public class Tracer extends ClientStreamTracer { private final Stopwatch stopwatch; private volatile Optional grpcQueueDuration = Optional.empty(); private final AtomicLong responseSize = new AtomicLong(); + private volatile Duration downstreamLatency; public Tracer(Metrics metrics, CallLabels callLabels) { this.metrics = metrics; @@ -116,6 +117,7 @@ public void inboundMessage(int seqNo) { public void onCallFinished(Status status) { grpcQueueDuration.ifPresent(d -> metrics.recordQueueLatency(attrs, d)); + metrics.recordDownstreamLatency(attrs, downstreamLatency); metrics.recordResponseSize(attrs, responseSize.get()); metrics.recordCallLatency( attrs, status, Duration.ofMillis(stopwatch.elapsed(TimeUnit.MILLISECONDS))); @@ -128,4 +130,8 @@ public void onCredentialsFetch(Status status, Duration duration) { public CallLabels getCallLabels() { return callLabels; } + + public void onDownstreamLatency(Duration latency) { + downstreamLatency = downstreamLatency.plus(latency); + } } diff --git a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java index df99b56d335..0fb2b33289f 100644 --- a/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java +++ b/bigtable/bigtable-proxy/src/test/java/com/google/cloud/bigtable/examples/proxy/metrics/NoopMetrics.java @@ -55,6 +55,9 @@ public void recordCallLatency(MetricsAttributes attrs, Status status, Duration d @Override public void recordFirstByteLatency(MetricsAttributes attrs, Duration duration) {} + @Override + public void recordDownstreamLatency(MetricsAttributes attrs, Duration latency) {} + @Override public void updateChannelCount(int delta) {} From 40d8dce93e6297e260e4fedb63a43aa047bed953 Mon Sep 17 00:00:00 2001 From: Aaron Abbott Date: Tue, 3 Dec 2024 12:00:45 -0500 Subject: [PATCH 53/66] feat(opencensus)!: remove OpenCensus samples (#9796) * feat(opencensus)!: remove OpenCensus samples * Also delete trace/ dir from CODEOWNERS --- .github/CODEOWNERS | 2 - opencensus/pom.xml | 93 ------------------ .../com/example/opencensus/Quickstart.java | 87 ---------------- trace/pom.xml | 86 ---------------- .../java/com/example/trace/TraceSample.java | 98 ------------------- .../java/com/example/trace/TraceSampleIT.java | 62 ------------ 6 files changed, 428 deletions(-) delete mode 100644 opencensus/pom.xml delete mode 100644 opencensus/src/main/java/com/example/opencensus/Quickstart.java delete mode 100644 trace/pom.xml delete mode 100644 trace/src/main/java/com/example/trace/TraceSample.java delete mode 100644 trace/src/test/java/com/example/trace/TraceSampleIT.java diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2a3f9fc7cee..92695687567 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -49,8 +49,6 @@ # DEE Platform Ops (DEEPO) /errorreporting @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers /monitoring @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers -/opencensus @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers -/trace @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers # Cloud SDK Databases & Data Analytics teams # ---* Cloud Native DB diff --git a/opencensus/pom.xml b/opencensus/pom.xml deleted file mode 100644 index dcdb6115781..00000000000 --- a/opencensus/pom.xml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - 4.0.0 - jar - com.example.opencensus - opencensus-samples - 1.0 - - - - com.google.cloud.samples - shared-configuration - 1.2.0 - - - - 1.8 - 1.8 - UTF-8 - 0.31.1 - - - - - - - com.google.cloud - libraries-bom - 26.32.0 - pom - import - - - - - - - - io.opencensus - opencensus-api - ${opencensus.version} - - - io.opencensus - opencensus-exporter-stats-stackdriver - ${opencensus.version} - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 3.1.1 - - - - java - - - - - com.example.opencensus.Quickstart - false - - - - - - diff --git a/opencensus/src/main/java/com/example/opencensus/Quickstart.java b/opencensus/src/main/java/com/example/opencensus/Quickstart.java deleted file mode 100644 index c304bf056dd..00000000000 --- a/opencensus/src/main/java/com/example/opencensus/Quickstart.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.opencensus; - -// [START monitoring_opencensus_metrics_quickstart] - -import com.google.common.collect.Lists; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; -import io.opencensus.stats.Aggregation; -import io.opencensus.stats.BucketBoundaries; -import io.opencensus.stats.Measure.MeasureLong; -import io.opencensus.stats.Stats; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.stats.View; -import io.opencensus.stats.View.Name; -import io.opencensus.stats.ViewManager; -import java.io.IOException; -import java.util.Collections; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -public class Quickstart { - private static final int EXPORT_INTERVAL = 70; - private static final MeasureLong LATENCY_MS = - MeasureLong.create("task_latency", "The task latency in milliseconds", "ms"); - // Latency in buckets: - // [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s] - private static final BucketBoundaries LATENCY_BOUNDARIES = - BucketBoundaries.create(Lists.newArrayList(0d, 100d, 200d, 400d, 1000d, 2000d, 4000d)); - private static final StatsRecorder STATS_RECORDER = Stats.getStatsRecorder(); - - public static void main(String[] args) throws IOException, InterruptedException { - // Register the view. It is imperative that this step exists, - // otherwise recorded metrics will be dropped and never exported. - View view = - View.create( - Name.create("task_latency_distribution"), - "The distribution of the task latencies.", - LATENCY_MS, - Aggregation.Distribution.create(LATENCY_BOUNDARIES), - Collections.emptyList()); - - ViewManager viewManager = Stats.getViewManager(); - viewManager.registerView(view); - - // [START setup_exporter] - // Enable OpenCensus exporters to export metrics to Stackdriver Monitoring. - // Exporters use Application Default Credentials to authenticate. - // See https://developers.google.com/identity/protocols/application-default-credentials - // for more details. - StackdriverStatsExporter.createAndRegister(); - // [END setup_exporter] - - // Record 100 fake latency values between 0 and 5 seconds. - Random rand = new Random(); - for (int i = 0; i < 100; i++) { - long ms = (long) (TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS) * rand.nextDouble()); - System.out.println(String.format("Latency %d: %d", i, ms)); - STATS_RECORDER.newMeasureMap().put(LATENCY_MS, ms).record(); - } - - // The default export interval is 60 seconds. The thread with the StackdriverStatsExporter must - // live for at least the interval past any metrics that must be collected, or some risk being - // lost if they are recorded after the last export. - - System.out.println( - String.format( - "Sleeping %d seconds before shutdown to ensure all records are flushed.", - EXPORT_INTERVAL)); - Thread.sleep(TimeUnit.MILLISECONDS.convert(EXPORT_INTERVAL, TimeUnit.SECONDS)); - } -} -// [END monitoring_opencensus_metrics_quickstart] diff --git a/trace/pom.xml b/trace/pom.xml deleted file mode 100644 index e8320cf319d..00000000000 --- a/trace/pom.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - 4.0.0 - jar - com.example.trace - trace-samples - 1.0 - - - - com.google.cloud.samples - shared-configuration - 1.2.0 - - - - - 1.8 - 1.8 - UTF-8 - - - - - - com.google.cloud - libraries-bom - 26.32.0 - pom - import - - - - - - - - io.opencensus - opencensus-exporter-trace-stackdriver - 0.31.1 - - - io.grpc - grpc-api - - - - - com.google.cloud - google-cloud-core - - - com.google.api - gax-grpc - - - - - - junit - junit - 4.13.2 - test - - - - diff --git a/trace/src/main/java/com/example/trace/TraceSample.java b/trace/src/main/java/com/example/trace/TraceSample.java deleted file mode 100644 index f706c59278d..00000000000 --- a/trace/src/main/java/com/example/trace/TraceSample.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.trace; - -import com.google.auth.oauth2.AccessToken; -import com.google.auth.oauth2.GoogleCredentials; -import io.opencensus.common.Scope; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceConfiguration; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceExporter; -import io.opencensus.trace.Tracer; -import io.opencensus.trace.Tracing; -import io.opencensus.trace.samplers.Samplers; -import java.io.IOException; -import java.time.Instant; -import java.util.Date; - -public class TraceSample { - - // [START trace_setup_java_custom_span] - private static final Tracer tracer = Tracing.getTracer(); - - public static void doWork() { - // Create a child Span of the current Span. - try (Scope ss = tracer.spanBuilder("MyChildWorkSpan").startScopedSpan()) { - doInitialWork(); - tracer.getCurrentSpan().addAnnotation("Finished initial work"); - doFinalWork(); - } - } - - private static void doInitialWork() { - // ... - tracer.getCurrentSpan().addAnnotation("Doing initial work"); - // ... - } - - private static void doFinalWork() { - // ... - tracer.getCurrentSpan().addAnnotation("Hello world!"); - // ... - } - // [END trace_setup_java_custom_span] - - // [START trace_setup_java_full_sampling] - public static void doWorkFullSampled() { - try (Scope ss = - tracer - .spanBuilder("MyChildWorkSpan") - .setSampler(Samplers.alwaysSample()) - .startScopedSpan()) { - doInitialWork(); - tracer.getCurrentSpan().addAnnotation("Finished initial work"); - doFinalWork(); - } - } - // [END trace_setup_java_full_sampling] - - // [START trace_setup_java_create_and_register] - public static void createAndRegister() throws IOException { - StackdriverTraceExporter.createAndRegister(StackdriverTraceConfiguration.builder().build()); - } - // [END trace_setup_java_create_and_register] - - // [START trace_setup_java_create_and_register_with_token] - public static void createAndRegisterWithToken(String accessToken) throws IOException { - Date expirationTime = Date.from(Instant.now().plusSeconds(60)); - - GoogleCredentials credentials = - GoogleCredentials.create(new AccessToken(accessToken, expirationTime)); - StackdriverTraceExporter.createAndRegister( - StackdriverTraceConfiguration.builder() - .setProjectId("MyStackdriverProjectId") - .setCredentials(credentials) - .build()); - } - // [END trace_setup_java_create_and_register_with_token] - - // [START trace_setup_java_register_exporter] - public static void createAndRegisterGoogleCloudPlatform(String projectId) throws IOException { - StackdriverTraceExporter.createAndRegister( - StackdriverTraceConfiguration.builder().setProjectId(projectId).build()); - } - // [END trace_setup_java_register_exporter] -} diff --git a/trace/src/test/java/com/example/trace/TraceSampleIT.java b/trace/src/test/java/com/example/trace/TraceSampleIT.java deleted file mode 100644 index b4a09bca316..00000000000 --- a/trace/src/test/java/com/example/trace/TraceSampleIT.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.trace; - -import com.google.common.base.Strings; -import io.opencensus.exporter.trace.stackdriver.StackdriverTraceExporter; -import java.io.IOException; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for stackdriver tracing sample. */ -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class TraceSampleIT { - private static final String CLOUD_PROJECT_KEY = "GOOGLE_CLOUD_PROJECT"; - - @BeforeClass - public static void setup() { - Assert.assertFalse(Strings.isNullOrEmpty(System.getenv(CLOUD_PROJECT_KEY))); - } - - @After - public void tearDown() { - StackdriverTraceExporter.unregister(); - } - - @Test - public void testCreateAndRegister() throws IOException { - TraceSample.createAndRegister(); - TraceSample.doWork(); - } - - @Test - public void testCreateAndRegisterFullSampled() throws IOException { - TraceSample.createAndRegister(); - TraceSample.doWorkFullSampled(); - } - - @Test - public void testCreateAndRegisterGoogleCloudPlatform() throws IOException { - TraceSample.createAndRegisterGoogleCloudPlatform(System.getenv(CLOUD_PROJECT_KEY)); - TraceSample.doWork(); - } -} From 1c923a30eeba442859acfe3b1fdbf0f4ff2e98e8 Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 4 Dec 2024 10:46:05 -0800 Subject: [PATCH 54/66] fix(eventarc): sets protojson to ignoreUnknownFields (#9807) * fix(eventarc): sets protojson to ignoreUnknownFields * lint * missing import * Reverting changes in Firestore samples * remove unused import --- .../java/com/example/cloudrun/CloudEventController.java | 9 +++++++-- .../src/main/java/functions/FirebaseFirestore.java | 7 ++++--- .../v2/hello-gcs/src/main/java/functions/HelloGcs.java | 7 ++++++- .../imagemagick/src/main/java/functions/ImageMagick.java | 7 ++++++- .../src/main/java/functions/OcrProcessImage.java | 7 ++++++- 5 files changed, 29 insertions(+), 8 deletions(-) diff --git a/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java b/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java index 103a232bd7d..dc289f7be0c 100644 --- a/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java +++ b/eventarc/storage-handler/src/main/java/com/example/cloudrun/CloudEventController.java @@ -46,7 +46,12 @@ ResponseEntity handleCloudEvent(@RequestBody CloudEvent cloudEvent) String json = new String(cloudEvent.getData().toBytes()); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(json, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(json, builder); StorageObjectData data = builder.build(); // Convert protobuf timestamp to java Instant @@ -54,7 +59,7 @@ ResponseEntity handleCloudEvent(@RequestBody CloudEvent cloudEvent) Instant updated = Instant.ofEpochSecond(ts.getSeconds(), ts.getNanos()); String msg = String.format( - "Cloud Storage object changed: %s/%s modified at %s\n", + "Cloud Storage object changed: %s/%s modified at %s%n", data.getBucket(), data.getName(), updated); System.out.println(msg); diff --git a/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java b/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java index dfd0acea59f..e79d77a167e 100644 --- a/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java +++ b/functions/v2/firebase/firestore/src/main/java/functions/FirebaseFirestore.java @@ -28,16 +28,17 @@ public class FirebaseFirestore implements CloudEventsFunction { @Override public void accept(CloudEvent event) throws InvalidProtocolBufferException { - DocumentEventData firestorEventData = DocumentEventData.parseFrom(event.getData().toBytes()); + DocumentEventData firestoreEventData = DocumentEventData + .parseFrom(event.getData().toBytes()); logger.info("Function triggered by event on: " + event.getSource()); logger.info("Event type: " + event.getType()); logger.info("Old value:"); - logger.info(firestorEventData.getOldValue().toString()); + logger.info(firestoreEventData.getOldValue().toString()); logger.info("New value:"); - logger.info(firestorEventData.getValue().toString()); + logger.info(firestoreEventData.getValue().toString()); } } diff --git a/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java b/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java index 26c49560d1d..a1b227a90bb 100644 --- a/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java +++ b/functions/v2/hello-gcs/src/main/java/functions/HelloGcs.java @@ -40,7 +40,12 @@ public void accept(CloudEvent event) throws InvalidProtocolBufferException { String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); StorageObjectData data = builder.build(); logger.info("Bucket: " + data.getBucket()); diff --git a/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java b/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java index 90a9fefd81d..546207d26cf 100644 --- a/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java +++ b/functions/v2/imagemagick/src/main/java/functions/ImageMagick.java @@ -156,7 +156,12 @@ private static StorageObjectData getEventData(CloudEvent event) // Extract Cloud Event data and convert to StorageObjectData String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); return builder.build(); } // [START functions_imagemagick_setup] diff --git a/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java b/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java index f41d76b1b66..05437bc3714 100644 --- a/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java +++ b/functions/v2/ocr/ocr-process-image/src/main/java/functions/OcrProcessImage.java @@ -71,7 +71,12 @@ public void accept(CloudEvent event) throws InvalidProtocolBufferException { // Unmarshal data from CloudEvent String cloudEventData = new String(event.getData().toBytes(), StandardCharsets.UTF_8); StorageObjectData.Builder builder = StorageObjectData.newBuilder(); - JsonFormat.parser().merge(cloudEventData, builder); + + // If you do not ignore unknown fields, then JsonFormat.Parser returns an + // error when encountering a new or unknown field. Note that you might lose + // some event data in the unmarshaling process by ignoring unknown fields. + JsonFormat.Parser parser = JsonFormat.parser().ignoringUnknownFields(); + parser.merge(cloudEventData, builder); StorageObjectData gcsEvent = builder.build(); String bucket = gcsEvent.getBucket(); From 50262b74d745ca0f1adafafabcc69b632eb53020 Mon Sep 17 00:00:00 2001 From: Rohith Rajan <129467542+rohithrajan-ai@users.noreply.github.com> Date: Thu, 5 Dec 2024 14:25:36 -0500 Subject: [PATCH 55/66] feat(managedkafka): increase LRO timeout to 1 hour in client samples (#9684) * fix: increase timeout duration in Java client * fix: only specify modified retry settings for LRO methods --- managedkafka/examples/pom.xml | 8 +- .../src/main/java/examples/CreateCluster.java | 40 ++++- .../src/main/java/examples/DeleteCluster.java | 28 +++- .../src/main/java/examples/UpdateCluster.java | 27 +++- .../src/test/java/examples/ClustersTest.java | 142 +++++++++++++++++- 5 files changed, 229 insertions(+), 16 deletions(-) diff --git a/managedkafka/examples/pom.xml b/managedkafka/examples/pom.xml index 248b42e2962..7f1343971b3 100644 --- a/managedkafka/examples/pom.xml +++ b/managedkafka/examples/pom.xml @@ -29,7 +29,7 @@ com.google.cloud libraries-bom - 26.40.0 + 26.50.0 pom import @@ -40,13 +40,7 @@ com.google.cloud google-cloud-managedkafka - 0.1.0 - - com.google.api.grpc - proto-google-cloud-managedkafka-v1 - 0.1.0 - junit junit diff --git a/managedkafka/examples/src/main/java/examples/CreateCluster.java b/managedkafka/examples/src/main/java/examples/CreateCluster.java index 478e6ce7f2b..63c22d30c6a 100644 --- a/managedkafka/examples/src/main/java/examples/CreateCluster.java +++ b/managedkafka/examples/src/main/java/examples/CreateCluster.java @@ -17,7 +17,13 @@ package examples; // [START managedkafka_create_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.cloud.managedkafka.v1.AccessConfig; import com.google.cloud.managedkafka.v1.CapacityConfig; import com.google.cloud.managedkafka.v1.Cluster; @@ -25,9 +31,11 @@ import com.google.cloud.managedkafka.v1.GcpConfig; import com.google.cloud.managedkafka.v1.LocationName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.NetworkConfig; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.cloud.managedkafka.v1.RebalanceConfig; +import java.time.Duration; import java.util.concurrent.ExecutionException; public class CreateCluster { @@ -64,17 +72,47 @@ public static void createCluster( .setRebalanceConfig(rebalanceConfig) .build(); - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.createClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { + CreateClusterRequest request = CreateClusterRequest.newBuilder() .setParent(LocationName.of(projectId, region).toString()) .setClusterId(clusterId) .setCluster(cluster) .build(); + // The duration of this operation can vary considerably, typically taking between 10-40 // minutes. OperationFuture future = managedKafkaClient.createClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster creation started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + + while (!future.isDone()) { + // The pollingFuture gives us the most recent status of the operation + RetryingFuture pollingFuture = future.getPollingFuture(); + OperationSnapshot currentOp = pollingFuture.getAttemptResult().get(); + System.out.printf("Polling Operation:\nName: %s\n Done: %s\n", + currentOp.getName(), + currentOp.isDone()); + } + + // NOTE: future.get() blocks completion until the operation is complete (isDone = True) Cluster response = future.get(); System.out.printf("Created cluster: %s\n", response.getName()); } catch (ExecutionException e) { diff --git a/managedkafka/examples/src/main/java/examples/DeleteCluster.java b/managedkafka/examples/src/main/java/examples/DeleteCluster.java index 391a86fcb41..767ef74a718 100644 --- a/managedkafka/examples/src/main/java/examples/DeleteCluster.java +++ b/managedkafka/examples/src/main/java/examples/DeleteCluster.java @@ -17,14 +17,21 @@ package examples; // [START managedkafka_delete_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.api.gax.rpc.ApiException; import com.google.cloud.managedkafka.v1.ClusterName; import com.google.cloud.managedkafka.v1.DeleteClusterRequest; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.protobuf.Empty; import java.io.IOException; +import java.time.Duration; public class DeleteCluster { @@ -38,13 +45,32 @@ public static void main(String[] args) throws Exception { public static void deleteCluster(String projectId, String region, String clusterId) throws Exception { - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.deleteClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { DeleteClusterRequest request = DeleteClusterRequest.newBuilder() .setName(ClusterName.of(projectId, region, clusterId).toString()) .build(); OperationFuture future = managedKafkaClient.deleteClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. CreateCluster contains sample code for polling logs. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster deletion started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + future.get(); System.out.println("Deleted cluster"); } catch (IOException | ApiException e) { diff --git a/managedkafka/examples/src/main/java/examples/UpdateCluster.java b/managedkafka/examples/src/main/java/examples/UpdateCluster.java index d33de2283d9..feeff950ffd 100644 --- a/managedkafka/examples/src/main/java/examples/UpdateCluster.java +++ b/managedkafka/examples/src/main/java/examples/UpdateCluster.java @@ -17,14 +17,21 @@ package examples; // [START managedkafka_update_cluster] + import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.longrunning.OperationTimedPollAlgorithm; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.retrying.TimedRetryAlgorithm; import com.google.cloud.managedkafka.v1.CapacityConfig; import com.google.cloud.managedkafka.v1.Cluster; import com.google.cloud.managedkafka.v1.ClusterName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; import com.google.cloud.managedkafka.v1.OperationMetadata; import com.google.cloud.managedkafka.v1.UpdateClusterRequest; import com.google.protobuf.FieldMask; +import java.time.Duration; import java.util.concurrent.ExecutionException; public class UpdateCluster { @@ -48,11 +55,29 @@ public static void updateCluster( .build(); FieldMask updateMask = FieldMask.newBuilder().addPaths("capacity_config.memory_bytes").build(); - try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create()) { + // Create the settings to configure the timeout for polling operations + ManagedKafkaSettings.Builder settingsBuilder = ManagedKafkaSettings.newBuilder(); + TimedRetryAlgorithm timedRetryAlgorithm = OperationTimedPollAlgorithm.create( + RetrySettings.newBuilder() + .setTotalTimeoutDuration(Duration.ofHours(1L)) + .build()); + settingsBuilder.updateClusterOperationSettings() + .setPollingAlgorithm(timedRetryAlgorithm); + + try (ManagedKafkaClient managedKafkaClient = ManagedKafkaClient.create( + settingsBuilder.build())) { UpdateClusterRequest request = UpdateClusterRequest.newBuilder().setUpdateMask(updateMask).setCluster(cluster).build(); OperationFuture future = managedKafkaClient.updateClusterOperationCallable().futureCall(request); + + // Get the initial LRO and print details. CreateCluster contains sample code for polling logs. + OperationSnapshot operation = future.getInitialFuture().get(); + System.out.printf("Cluster update started. Operation name: %s\nDone: %s\nMetadata: %s\n", + operation.getName(), + operation.isDone(), + future.getMetadata().get().toString()); + Cluster response = future.get(); System.out.printf("Updated cluster: %s\n", response.getName()); } catch (ExecutionException e) { diff --git a/managedkafka/examples/src/test/java/examples/ClustersTest.java b/managedkafka/examples/src/test/java/examples/ClustersTest.java index e24ee8ae837..e5d47e3edbd 100644 --- a/managedkafka/examples/src/test/java/examples/ClustersTest.java +++ b/managedkafka/examples/src/test/java/examples/ClustersTest.java @@ -24,10 +24,21 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.api.core.ApiFuture; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.retrying.RetryingFuture; +import com.google.api.gax.rpc.OperationCallable; import com.google.cloud.managedkafka.v1.Cluster; import com.google.cloud.managedkafka.v1.ClusterName; +import com.google.cloud.managedkafka.v1.CreateClusterRequest; +import com.google.cloud.managedkafka.v1.DeleteClusterRequest; import com.google.cloud.managedkafka.v1.LocationName; import com.google.cloud.managedkafka.v1.ManagedKafkaClient; +import com.google.cloud.managedkafka.v1.ManagedKafkaSettings; +import com.google.cloud.managedkafka.v1.OperationMetadata; +import com.google.cloud.managedkafka.v1.UpdateClusterRequest; +import com.google.protobuf.Empty; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.util.ArrayList; @@ -58,11 +69,54 @@ public void setUp() { @Test public void createClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); + try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.createClusterOperationCallable()) - .thenReturn(MockOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(CreateClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // polling future + RetryingFuture pollingFuture = mock(RetryingFuture.class); + when(operationFuture.getPollingFuture()).thenReturn(pollingFuture); + when(operationFuture.isDone()).thenReturn(false, false, true); + ApiFuture attemptResult = mock(ApiFuture.class); + when(pollingFuture.getAttemptResult()).thenReturn(attemptResult); + when(attemptResult.get()).thenReturn(operationSnapshot); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(resultCluster); + when(resultCluster.getName()).thenReturn(clusterName); + String subnet = "test-subnet"; int cpu = 3; long memory = 3221225472L; @@ -71,6 +125,10 @@ public void createClusterTest() throws Exception { assertThat(output).contains("Created cluster"); assertThat(output).contains(clusterName); verify(managedKafkaClient, times(1)).createClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(CreateClusterRequest.class)); + verify(operationFuture, times(2)).getPollingFuture(); // Verify 2 polling attempts + verify(pollingFuture, times(2)).getAttemptResult(); // Verify 2 attempt results + verify(operationSnapshot, times(3)).isDone(); // 2 polls + 1 initial check } } @@ -122,32 +180,104 @@ public void listClustersTest() throws Exception { @Test public void updateClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); + try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.updateClusterOperationCallable()) - .thenReturn(MockOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(UpdateClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(resultCluster); + when(resultCluster.getName()).thenReturn(clusterName); + long updatedMemory = 4221225472L; UpdateCluster.updateCluster(projectId, region, projectId, updatedMemory); String output = bout.toString(); assertThat(output).contains("Updated cluster"); assertThat(output).contains(clusterName); verify(managedKafkaClient, times(1)).updateClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(UpdateClusterRequest.class)); } } @Test public void deleteClusterTest() throws Exception { ManagedKafkaClient managedKafkaClient = mock(ManagedKafkaClient.class); + OperationCallable operationCallable = + mock(OperationCallable.class); + OperationFuture operationFuture = + mock(OperationFuture.class); try (MockedStatic mockedStatic = Mockito.mockStatic(ManagedKafkaClient.class)) { - mockedStatic.when(() -> create()).thenReturn(managedKafkaClient); + + // client creation + mockedStatic.when(() -> create(any(ManagedKafkaSettings.class))) + .thenReturn(managedKafkaClient); + + // operation callable when(managedKafkaClient.deleteClusterOperationCallable()) - .thenReturn(MockDeleteOperationFuture.getOperableCallable()); + .thenReturn(operationCallable); + when(operationCallable.futureCall(any(DeleteClusterRequest.class))) + .thenReturn(operationFuture); + + // initial future + ApiFuture initialFuture = mock(ApiFuture.class); + when(operationFuture.getInitialFuture()).thenReturn(initialFuture); + + // Metadata + ApiFuture metadataFuture = mock(ApiFuture.class); + OperationMetadata metadata = mock(OperationMetadata.class); + when(operationFuture.getMetadata()).thenReturn(metadataFuture); + when(metadataFuture.get()).thenReturn(metadata); + + // operation snapshot + OperationSnapshot operationSnapshot = mock(OperationSnapshot.class); + when(operationFuture.getInitialFuture().get()).thenReturn(operationSnapshot); + when(operationSnapshot.getName()) + .thenReturn("projects/test-project/locations/test-location/operations/test-operation"); + when(operationSnapshot.isDone()).thenReturn(false, false, true); + + // Setup final result + Cluster resultCluster = mock(Cluster.class); + when(operationFuture.get()).thenReturn(Empty.getDefaultInstance()); + when(resultCluster.getName()).thenReturn(clusterName); + DeleteCluster.deleteCluster(projectId, region, clusterId); String output = bout.toString(); assertThat(output).contains("Deleted cluster"); + verify(managedKafkaClient, times(1)).deleteClusterOperationCallable(); + verify(operationCallable, times(1)).futureCall(any(DeleteClusterRequest.class)); } } } From bb0108cf612e4ed6fce3f27986eca3e0677c1e68 Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Thu, 5 Dec 2024 15:16:05 -0600 Subject: [PATCH 56/66] chore(automl_vision): delete sample automl_vision_classification_deploy_model (#9808) --- .../automl/ClassificationDeployModel.java | 60 ------------------- .../automl/ClassificationDeployModelIT.java | 14 ----- 2 files changed, 74 deletions(-) delete mode 100644 automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java diff --git a/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java b/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java deleted file mode 100644 index 63da52ead0d..00000000000 --- a/automl/src/main/java/com/google/cloud/vision/samples/automl/ClassificationDeployModel.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.vision.samples.automl; - -// [START automl_vision_classification_deploy_model] -import com.google.api.gax.longrunning.OperationFuture; -import com.google.cloud.automl.v1beta1.AutoMlClient; -import com.google.cloud.automl.v1beta1.DeployModelRequest; -import com.google.cloud.automl.v1beta1.ModelName; -import com.google.cloud.automl.v1beta1.OperationMetadata; -import com.google.protobuf.Empty; -import java.io.IOException; -import java.util.concurrent.ExecutionException; - -class ClassificationDeployModel { - - // Deploy a model - static void classificationDeployModel(String projectId, String modelId) - throws IOException, ExecutionException, InterruptedException { - // String projectId = "YOUR_PROJECT_ID"; - // String modelId = "YOUR_MODEL_ID"; - - // Initialize client that will be used to send requests. This client only needs to be created - // once, and can be reused for multiple requests. After completing all of your requests, call - // the "close" method on the client to safely clean up any remaining background resources. - try (AutoMlClient client = AutoMlClient.create()) { - - // Get the full path of the model. - ModelName modelFullId = ModelName.of(projectId, "us-central1", modelId); - - // Build deploy model request. - DeployModelRequest deployModelRequest = - DeployModelRequest.newBuilder().setName(modelFullId.toString()).build(); - - // Deploy a model with the deploy model request. - OperationFuture future = - client.deployModelAsync(deployModelRequest); - - future.get(); - - // Display the deployment details of model. - System.out.println("Model deployment finished"); - } - } -} -// [END automl_vision_classification_deploy_model] diff --git a/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java b/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java index 50095202fa3..7431265fa11 100644 --- a/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java +++ b/automl/src/test/java/com/google/cloud/vision/samples/automl/ClassificationDeployModelIT.java @@ -45,20 +45,6 @@ public void tearDown() { System.setOut(null); } - @Test - public void testClassificationDeployModelApi() { - // As model deployment can take a long time, instead try to deploy a - // nonexistent model and confirm that the model was not found, but other - // elements of the request were valid. - try { - ClassificationDeployModel.classificationDeployModel(PROJECT_ID, MODEL_ID); - String got = bout.toString(); - assertThat(got).contains("The model does not exist"); - } catch (IOException | ExecutionException | InterruptedException e) { - assertThat(e.getMessage()).contains("The model does not exist"); - } - } - @Test public void testClassificationDeployModelNodeCountApi() { // As model deployment can take a long time, instead try to deploy a From 80b46d6b5dec4094390562e456a0488d5452400c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Mon, 9 Dec 2024 01:45:09 +0100 Subject: [PATCH 57/66] feat(tpu): add tpu vm list sample. (#9606) Implemented tpu_vm_list sample, created test --- tpu/src/main/java/tpu/ListTpuVms.java | 52 +++++++++++++++++++++++++++ tpu/src/test/java/tpu/TpuVmIT.java | 26 ++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 tpu/src/main/java/tpu/ListTpuVms.java diff --git a/tpu/src/main/java/tpu/ListTpuVms.java b/tpu/src/main/java/tpu/ListTpuVms.java new file mode 100644 index 00000000000..b9d834b758e --- /dev/null +++ b/tpu/src/main/java/tpu/ListTpuVms.java @@ -0,0 +1,52 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_list] +import com.google.cloud.tpu.v2.ListNodesRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; + +public class ListTpuVms { + + public static void main(String[] args) throws IOException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPUs are located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + + listTpuVms(projectId, zone); + } + + // Lists TPU VMs in the specified zone. + public static TpuClient.ListNodesPage listTpuVms(String projectId, String zone) + throws IOException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String parent = String.format("projects/%s/locations/%s", projectId, zone); + + ListNodesRequest request = ListNodesRequest.newBuilder().setParent(parent).build(); + + return tpuClient.listNodes(request).getPage(); + } + } +} +//[END tpu_vm_list] diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java index a640953c445..12b7bd470c8 100644 --- a/tpu/src/test/java/tpu/TpuVmIT.java +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -30,12 +30,15 @@ import com.google.cloud.tpu.v2.CreateNodeRequest; import com.google.cloud.tpu.v2.DeleteNodeRequest; import com.google.cloud.tpu.v2.GetNodeRequest; +import com.google.cloud.tpu.v2.ListNodesRequest; import com.google.cloud.tpu.v2.Node; import com.google.cloud.tpu.v2.TpuClient; import com.google.cloud.tpu.v2.TpuSettings; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; import java.util.concurrent.ExecutionException; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -140,4 +143,27 @@ public void testCreateTpuVmWithTopologyFlag() assertEquals(returnedNode, mockNode); } } + + @Test + public void testListTpuVm() throws IOException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + Node mockNode1 = mock(Node.class); + Node mockNode2 = mock(Node.class); + List mockListNodes = Arrays.asList(mockNode1, mockNode2); + TpuClient mockTpuClient = mock(TpuClient.class); + TpuClient.ListNodesPagedResponse mockListNodesResponse = + mock(TpuClient.ListNodesPagedResponse.class); + TpuClient.ListNodesPage mockListNodesPage = mock(TpuClient.ListNodesPage.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockTpuClient); + when(mockTpuClient.listNodes(any(ListNodesRequest.class))).thenReturn(mockListNodesResponse); + when(mockListNodesResponse.getPage()).thenReturn(mockListNodesPage); + when(mockListNodesPage.getValues()).thenReturn(mockListNodes); + + TpuClient.ListNodesPage returnedListNodes = ListTpuVms.listTpuVms(PROJECT_ID, ZONE); + + assertThat(returnedListNodes.getValues()).isEqualTo(mockListNodes); + verify(mockTpuClient, times(1)).listNodes(any(ListNodesRequest.class)); + } + } } \ No newline at end of file From 437d4c7276d11c7d52b734f5420149f8a9fb0809 Mon Sep 17 00:00:00 2001 From: "eapl.mx" <64097272+eapl-gemugami@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:49:07 -0600 Subject: [PATCH 58/66] chore(language): delete sample language_install_with_bom (#9794) * chore(language): delete sample language_install_with_bom * chore(language): revert 'com.google.cloud' dependency originally in the removed region * Revert "chore(language): delete sample language_install_with_bom" This reverts commit 729e69612e8734ea1c0400520f1677865ff2ea74. --- language/snippets/pom.xml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/language/snippets/pom.xml b/language/snippets/pom.xml index 5914ee501c2..561349ff7ce 100644 --- a/language/snippets/pom.xml +++ b/language/snippets/pom.xml @@ -23,8 +23,6 @@ UTF-8 - - @@ -42,8 +40,6 @@ com.google.cloud google-cloud-language - - junit junit From 7070190202d078e4618350037d28e776aa8e8e2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Mon, 9 Dec 2024 22:26:23 +0100 Subject: [PATCH 59/66] feat(tpu): add tpu vm stop/start samples. (#9607) * Changed package, added information to CODEOWNERS * Added information to CODEOWNERS * Added timeout * Fixed parameters for test * Fixed DeleteTpuVm and naming * Added comment, created Util class * Fixed naming * Fixed whitespace * Split PR into smaller, deleted redundant code * Implemented tpu_vm_stop and tpu_vm_start samples, created tests * Changed zone * Fixed empty lines and tests, deleted cleanup method * Fixed tests * Fixed comment --- tpu/src/main/java/tpu/GetTpuVm.java | 2 +- tpu/src/main/java/tpu/StartTpuVm.java | 58 ++++++++++++++++++++++++++ tpu/src/main/java/tpu/StopTpuVm.java | 59 +++++++++++++++++++++++++++ tpu/src/test/java/tpu/TpuVmIT.java | 44 ++++++++++++++++++++ 4 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 tpu/src/main/java/tpu/StartTpuVm.java create mode 100644 tpu/src/main/java/tpu/StopTpuVm.java diff --git a/tpu/src/main/java/tpu/GetTpuVm.java b/tpu/src/main/java/tpu/GetTpuVm.java index b1d6608b5b4..6dc40f4150e 100644 --- a/tpu/src/main/java/tpu/GetTpuVm.java +++ b/tpu/src/main/java/tpu/GetTpuVm.java @@ -27,7 +27,7 @@ public class GetTpuVm { public static void main(String[] args) throws IOException { // TODO(developer): Replace these variables before running the sample. - // Project ID or project number of the Google Cloud project you want to create a node. + // Project ID or project number of the Google Cloud project you want to use. String projectId = "YOUR_PROJECT_ID"; // The zone in which to create the TPU. // For more information about supported TPU types for specific zones, diff --git a/tpu/src/main/java/tpu/StartTpuVm.java b/tpu/src/main/java/tpu/StartTpuVm.java new file mode 100644 index 00000000000..16546a78bf5 --- /dev/null +++ b/tpu/src/main/java/tpu/StartTpuVm.java @@ -0,0 +1,58 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_start] +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.StartNodeRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class StartTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPU is located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + startTpuVm(projectId, zone, nodeName); + } + + // Starts a TPU VM with the specified name in the given project and zone. + public static Node startTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + StartNodeRequest request = StartNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.startNodeAsync(request).get(); + } + } +} +//[END tpu_vm_start] \ No newline at end of file diff --git a/tpu/src/main/java/tpu/StopTpuVm.java b/tpu/src/main/java/tpu/StopTpuVm.java new file mode 100644 index 00000000000..ccaf668e889 --- /dev/null +++ b/tpu/src/main/java/tpu/StopTpuVm.java @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tpu; + +//[START tpu_vm_stop] +import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.NodeName; +import com.google.cloud.tpu.v2.StopNodeRequest; +import com.google.cloud.tpu.v2.TpuClient; +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +public class StopTpuVm { + + public static void main(String[] args) + throws IOException, ExecutionException, InterruptedException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Google Cloud project you want to use. + String projectId = "YOUR_PROJECT_ID"; + // The zone where the TPU is located. + // For more information about supported TPU types for specific zones, + // see https://cloud.google.com/tpu/docs/regions-zones + String zone = "us-central1-f"; + // The name for your TPU. + String nodeName = "YOUR_TPU_NAME"; + + stopTpuVm(projectId, zone, nodeName); + } + + // Stops a TPU VM with the specified name in the given project and zone. + public static Node stopTpuVm(String projectId, String zone, String nodeName) + throws IOException, ExecutionException, InterruptedException { + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (TpuClient tpuClient = TpuClient.create()) { + String name = NodeName.of(projectId, zone, nodeName).toString(); + + StopNodeRequest request = StopNodeRequest.newBuilder().setName(name).build(); + + return tpuClient.stopNodeAsync(request).get(); + } + } +} +//[END tpu_vm_stop] + diff --git a/tpu/src/test/java/tpu/TpuVmIT.java b/tpu/src/test/java/tpu/TpuVmIT.java index 12b7bd470c8..e40e220fec8 100644 --- a/tpu/src/test/java/tpu/TpuVmIT.java +++ b/tpu/src/test/java/tpu/TpuVmIT.java @@ -32,6 +32,8 @@ import com.google.cloud.tpu.v2.GetNodeRequest; import com.google.cloud.tpu.v2.ListNodesRequest; import com.google.cloud.tpu.v2.Node; +import com.google.cloud.tpu.v2.StartNodeRequest; +import com.google.cloud.tpu.v2.StopNodeRequest; import com.google.cloud.tpu.v2.TpuClient; import com.google.cloud.tpu.v2.TpuSettings; import java.io.ByteArrayOutputStream; @@ -166,4 +168,46 @@ public void testListTpuVm() throws IOException { verify(mockTpuClient, times(1)).listNodes(any(ListNodesRequest.class)); } } + + @Test + public void testStartTpuVm() throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + Node mockNode = mock(Node.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.startNodeAsync(any(StartNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = StartTpuVm.startTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .startNodeAsync(any(StartNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } + + @Test + public void testStopTpuVm() throws IOException, ExecutionException, InterruptedException { + try (MockedStatic mockedTpuClient = mockStatic(TpuClient.class)) { + TpuClient mockClient = mock(TpuClient.class); + Node mockNode = mock(Node.class); + OperationFuture mockFuture = mock(OperationFuture.class); + + mockedTpuClient.when(TpuClient::create).thenReturn(mockClient); + when(mockClient.stopNodeAsync(any(StopNodeRequest.class))) + .thenReturn(mockFuture); + when(mockFuture.get()).thenReturn(mockNode); + + Node returnedNode = StopTpuVm.stopTpuVm(PROJECT_ID, ZONE, NODE_NAME); + + verify(mockClient, times(1)) + .stopNodeAsync(any(StopNodeRequest.class)); + verify(mockFuture, times(1)).get(); + assertEquals(returnedNode, mockNode); + } + } } \ No newline at end of file From 1130203f96a9c214b80056a9f37e41c279fec239 Mon Sep 17 00:00:00 2001 From: Jack Wotherspoon Date: Tue, 10 Dec 2024 15:28:45 -0500 Subject: [PATCH 60/66] chore: Update CODEOWNERS (#9853) Cloud SQL samples ownership has moved from Cloud SDK back to Cloud SQL and as such the `infra-db-sdk` team should instead be `cloud-sql-connectors` Will need someone to give write access to @GoogleCloudPlatform/cloud-sql-connectors on this repo for the CODEOWNERS file to be happy --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 92695687567..a16c4cc85a5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -59,7 +59,7 @@ /storage @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-storage-dpes /storage-transfer @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-storage-dpes # ---* Infra DB -/cloud-sql @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/infra-db-sdk +/cloud-sql @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/cloud-sql-connectors # Data & AI /aiplatform @GoogleCloudPlatform/java-samples-reviewers @yoshi-approver @GoogleCloudPlatform/cloud-samples-reviewers @GoogleCloudPlatform/text-embedding From 66f1c624488e32c27b2e7b37a88707325c598e65 Mon Sep 17 00:00:00 2001 From: Bo Shi Date: Tue, 10 Dec 2024 12:30:01 -0800 Subject: [PATCH 61/66] feat(routeoptimization): add keepalive gRPC channel example (#9797) --- .../src/main/java/com/example/OptimizeTours.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java b/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java index aacca58c3ab..cff70e3fbe4 100644 --- a/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java +++ b/routeoptimization/snippets/src/main/java/com/example/OptimizeTours.java @@ -22,14 +22,17 @@ package com.example; // [START routeoptimization_v1_OptimizeTours_sync] + import com.google.maps.routeoptimization.v1.OptimizeToursRequest; import com.google.maps.routeoptimization.v1.OptimizeToursResponse; import com.google.maps.routeoptimization.v1.RouteOptimizationClient; +import com.google.maps.routeoptimization.v1.RouteOptimizationSettings; import com.google.maps.routeoptimization.v1.Shipment; import com.google.maps.routeoptimization.v1.Shipment.VisitRequest; import com.google.maps.routeoptimization.v1.ShipmentModel; import com.google.maps.routeoptimization.v1.Vehicle; import com.google.type.LatLng; +import java.time.Duration; public class OptimizeTours { // [END routeoptimization_v1_OptimizeTours_sync] @@ -41,7 +44,16 @@ public static void main(String[] args) throws Exception { // [START routeoptimization_v1_OptimizeTours_sync] public static OptimizeToursResponse optimizeTours(String projectId) throws Exception { - RouteOptimizationClient client = RouteOptimizationClient.create(); + // Optional: method calls that last tens of minutes may be interrupted + // without enabling a short keep-alive interval. + RouteOptimizationSettings clientSettings = RouteOptimizationSettings + .newBuilder() + .setTransportChannelProvider(RouteOptimizationSettings + .defaultGrpcTransportProviderBuilder() + .setKeepAliveTimeDuration(Duration.ofSeconds(30)) + .build()).build(); + + RouteOptimizationClient client = RouteOptimizationClient.create(clientSettings); OptimizeToursRequest request = OptimizeToursRequest.newBuilder() .setParent("projects/" + projectId) From 9b219c01e7e6bf667c4e0d33348f95d156432c0f Mon Sep 17 00:00:00 2001 From: Eric Schmidt Date: Wed, 11 Dec 2024 13:33:59 -0800 Subject: [PATCH 62/66] fix: removes unneeded test, extends timeout (#9841) * fix: removes unneeded test, extends timeout * marking tests as Ignore --- .../cloud/translate/automl/DatasetApiIT.java | 73 ------------------- .../com/example/batch/CreateResourcesIT.java | 14 ++++ .../src/test/java/com/example/batch/Util.java | 2 +- 3 files changed, 15 insertions(+), 74 deletions(-) delete mode 100644 automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java diff --git a/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java b/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java deleted file mode 100644 index bd02bde649e..00000000000 --- a/automl/src/test/java/com/google/cloud/translate/automl/DatasetApiIT.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2018 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.translate.automl; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.gax.rpc.NotFoundException; -import io.grpc.StatusRuntimeException; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.concurrent.ExecutionException; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -/** Tests for Automl translation "Dataset API" sample. */ -@Ignore("This test is ignored because the legacy version of AutoML API is deprecated") -@RunWith(JUnit4.class) -@SuppressWarnings("checkstyle:abbreviationaswordinname") -public class DatasetApiIT { - - private static final String PROJECT_ID = "java-docs-samples-testing"; - private static final String BUCKET = PROJECT_ID + "-vcm"; - private static final String COMPUTE_REGION = "us-central1"; - private ByteArrayOutputStream bout; - private PrintStream originalPrintStream; - private String datasetId = "TEN0000000000000000000"; - - @Before - public void setUp() { - bout = new ByteArrayOutputStream(); - PrintStream out = new PrintStream(bout); - originalPrintStream = System.out; - System.setOut(out); - } - - @After - public void tearDown() { - // restores print statements in the original method - System.out.flush(); - System.setOut(originalPrintStream); - } - - @Test - public void testCreateImportDeleteDataset() throws IOException, InterruptedException { - try { - DatasetApi.importData( - PROJECT_ID, COMPUTE_REGION, datasetId, "gs://" + BUCKET + "/en-ja-short.csv"); - String got = bout.toString(); - assertThat(got).contains("The Dataset doesn't exist "); - } catch (NotFoundException | ExecutionException | StatusRuntimeException ex) { - assertThat(ex.getMessage()).contains("The Dataset doesn't exist"); - } - } -} diff --git a/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java b/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java index 03e6ab7ba64..8e4f8242e0b 100644 --- a/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java +++ b/batch/snippets/src/test/java/com/example/batch/CreateResourcesIT.java @@ -36,6 +36,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -135,6 +136,7 @@ private static void safeDeleteJob(String jobName) { } } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomServiceAccountTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -148,6 +150,7 @@ public void createBatchCustomServiceAccountTest() Assert.assertNotNull(job.getAllocationPolicy().getServiceAccount().getEmail()); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchUsingSecretManager() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -164,6 +167,7 @@ public void createBatchUsingSecretManager() -> taskGroup.getTaskSpec().getEnvironment().containsSecretVariables(variableName))); } + @Ignore("Canceling jobs not yet GA") @Test public void createGpuJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -181,6 +185,7 @@ public void createGpuJobTest() -> instance.getPolicy().getMachineType().contains(machineType))); } + @Ignore("Canceling jobs not yet GA") @Test public void createGpuJobN1Test() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -199,6 +204,7 @@ public void createGpuJobN1Test() -> accelerator.getType().contains(gpuType) && accelerator.getCount() == count))); } + @Ignore("Canceling jobs not yet GA") @Test public void createLocalSsdJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -217,6 +223,7 @@ public void createLocalSsdJobTest() -> attachedDisk.getDeviceName().contains(LOCAL_SSD_NAME)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createPersistentDiskJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -243,6 +250,7 @@ public void createPersistentDiskJobTest() -> attachedDisk.getDeviceName().contains(NEW_PERSISTENT_DISK_NAME)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchNotificationTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -263,6 +271,7 @@ public void createBatchNotificationTest() && jobNotification.getMessage().getNewTaskState() == State.FAILED)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomEventTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -284,6 +293,7 @@ public void createBatchCustomEventTest() .anyMatch(runnable -> runnable.getDisplayName().equals(displayName)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createScriptJobWithNfsTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -303,6 +313,7 @@ public void createScriptJobWithNfsTest() .anyMatch(volume -> volume.getNfs().getServer().equals(NFS_IP_ADDRESS)))); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchLabelJobTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -324,6 +335,7 @@ public void createBatchLabelJobTest() Assert.assertTrue(job.getLabelsMap().containsValue(labelValue2)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchCustomNetworkTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -346,6 +358,7 @@ public void createBatchCustomNetworkTest() .anyMatch(AllocationPolicy.NetworkInterface::getNoExternalIpAddress)); } + @Ignore("Canceling jobs not yet GA") @Test public void createJobWithAllocationPolicyLabelTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { @@ -368,6 +381,7 @@ public void createJobWithAllocationPolicyLabelTest() Assert.assertTrue(job.getAllocationPolicy().getLabelsMap().containsValue(labelValue2)); } + @Ignore("Canceling jobs not yet GA") @Test public void createBatchRunnableLabelTest() throws IOException, ExecutionException, InterruptedException, TimeoutException { diff --git a/batch/snippets/src/test/java/com/example/batch/Util.java b/batch/snippets/src/test/java/com/example/batch/Util.java index eb4342ac572..5a6635ff71b 100644 --- a/batch/snippets/src/test/java/com/example/batch/Util.java +++ b/batch/snippets/src/test/java/com/example/batch/Util.java @@ -109,7 +109,7 @@ public static void waitForJobCompletion(Job job) String[] jobName = job.getName().split("/"); Instant startTime = Instant.now(); while (WAIT_STATES.contains(job.getStatus().getState())) { - if (Instant.now().getEpochSecond() - startTime.getEpochSecond() > 900) { + if (Instant.now().getEpochSecond() - startTime.getEpochSecond() > 1200) { throw new Error("Timed out waiting for operation to complete."); } job = getJob(jobName[1], jobName[3], jobName[5]); From 16b0dff2df7142b62d3a1d9c4dcc1d179a0c2de6 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 13 Dec 2024 03:47:59 +0100 Subject: [PATCH 63/66] chore(config): migrate config .github/renovate.json5 (#9616) --- .github/renovate.json5 | 190 ++++++++++++++++++++--------------------- 1 file changed, 92 insertions(+), 98 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 5a9bf7c5360..85e6da61771 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,142 +1,136 @@ -// find legacy configuration at https://github.com/GoogleCloudPlatform/java-docs-samples/blob/91792d4da53a12f96032f4556815f7d91f27257b/renovate.json { - "extends": [ - "config:recommended", - ":approveMajorUpdates", - "schedule:earlyMondays", - ":ignoreUnstable", + extends: [ + 'config:recommended', + ':approveMajorUpdates', + 'schedule:earlyMondays', + ':ignoreUnstable', ], - "labels": [ - "dependencies", - "automerge" + labels: [ + 'dependencies', + 'automerge', ], - "minimumReleaseAge": "7 days", - "dependencyDashboardLabels": [ - "type: process", + minimumReleaseAge: '7 days', + dependencyDashboardLabels: [ + 'type: process', ], - // discontinue upgrades for java8 code samples - "ignorePaths": ["**/*java8*/**", "**/*java-8*/**"], - "packageRules": [ + ignorePaths: [ + '**/*java8*/**', + '**/*java-8*/**', + ], + packageRules: [ { - "matchCategories": [ - "java" + matchCategories: [ + 'java', + ], + addLabels: [ + 'lang: java', ], - "addLabels": [ - "lang: java" - ] }, - // TODO: check if auto-merge rules will work at all { - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', ], - "automerge": true + automerge: true, }, { - "matchDepTypes": [ - "devDependencies" + matchDepTypes: [ + 'devDependencies', ], - "automerge": true + automerge: true, }, - // group all Dockerfile dependencies { - "matchCategories": [ - "docker" - ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" - ], - "groupName": "docker", - "pinDigests": true, - "automerge": true + matchCategories: [ + 'docker', + ], + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', + ], + groupName: 'docker', + pinDigests: true, + automerge: true, }, - // group all terraform dependencies for google providers { - "matchCategories": [ - "terraform" + matchCategories: [ + 'terraform', + ], + matchDepTypes: [ + 'provider', + 'required_provider', ], - "matchDepTypes": [ - "provider", - "required_provider" + groupName: 'Terraform Google providers', + matchPackageNames: [ + '/^google/', ], - "matchPackagePatterns": "^google", - "groupName": "Terraform Google providers", }, - // *** Java dependency rules: - // group *ALL* Java dependencies { - "matchCategories": [ - "java" + matchCategories: [ + 'java', ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest", - "lockFileMaintenance" + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + 'lockFileMaintenance', ], - "groupName": "java", - "automerge": true + groupName: 'java', + automerge: true, }, - // do not allow Spring Boot 3 upgrades yet { - "matchCategories": [ - "java" + matchCategories: [ + 'java', ], - "matchPackagePatterns": [ - "org.springframework.boot" + matchCurrentVersion: '>=2.0.0, <3.0.0', + allowedVersions: '<3', + groupName: 'Spring Boot upgrades for v2', + description: '@akitsch: Spring Boot V3 requires Java 17', + matchPackageNames: [ + '/org.springframework.boot/', ], - "matchCurrentVersion": ">=2.0.0, <3.0.0", - "allowedVersions": "<3", - "groupName": "Spring Boot upgrades for v2", - "description": "@akitsch: Spring Boot V3 requires Java 17" }, - // limit micronaut upgrades for versions <= 4 { - "matchPackagePatterns": [ - "^io.micronaut" + groupName: 'Micronaut packages', + allowedVersions: '<4', + matchFileNames: [ + 'appengine-java11/**', + 'flexible/java-11/**', ], - "groupName": "Micronaut packages", - "allowedVersions": "<4", - "matchPaths": [ - "appengine-java11/**", - "flexible/java-11/**" + description: '@akitsch: Micronaut V4 requires Java 17', + matchPackageNames: [ + '/^io.micronaut/', ], - "description": "@akitsch: Micronaut V4 requires Java 17" }, - // disable Scala dependency upgrades { - "matchPackagePatterns": [ - "scala" + enabled: false, + matchPackageNames: [ + '/scala/', ], - "enabled": false }, { - "matchPackagePatterns": [ - "^jackson-module-scala" + enabled: false, + matchPackageNames: [ + '/^jackson-module-scala/', ], - "enabled": false }, - // disable SQL Spark dependency upgrades { - "matchPackagePatterns": [ - "^spark-sql" + enabled: false, + matchPackageNames: [ + '/^spark-sql/', ], - "enabled": false }, {}, ], - "rebaseWhen": "behind-base-branch", - "semanticCommits": "enabled", - "vulnerabilityAlerts": { - "labels": [ - "type:security" + rebaseWhen: 'behind-base-branch', + semanticCommits: 'enabled', + vulnerabilityAlerts: { + labels: [ + 'type:security', ], - "minimumReleaseAge": null + minimumReleaseAge: null, }, -} \ No newline at end of file +} From b6ba754ee230b6a12a60236ad07d57b24c45a248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=A2=D0=B5=D1=82=D1=8F=D0=BD=D0=B0=20=D0=AF=D0=B3=D0=BE?= =?UTF-8?q?=D0=B4=D1=81=D1=8C=D0=BA=D0=B0?= <49729677+TetyanaYahodska@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:07:46 +0100 Subject: [PATCH 64/66] feat(compute): add compute consistency group disks list (#9687) * Implemented compute_consistency_group_create and compute_consistency_group_delete samples, created test * Implemented compute_consistency_group_add_disk sample * Implemented compute_consistency_group_add_disk sample, created test * Fixed naming * Implemented compute_consistency_group_list_disks sample, created test * Fixed code * Fixed code * Fixed code * Cleaned up reservations * Fixed import * Fixed code * Fixed zone * Fixed zone * Fixed test * Fixed code and test * Added check for zonal location and code * Deleted redundant code * Added compute_consistency_group_list_disks_zonal sample, created test * Fixed tests --- .../ListRegionalDisksInConsistencyGroup.java | 74 +++++++++++++++++++ .../ListZonalDisksInConsistencyGroup.java | 73 ++++++++++++++++++ .../compute/disks/ConsistencyGroupIT.java | 49 +++++++++++- .../test/java/compute/disks/HyperdiskIT.java | 10 +-- 4 files changed, 200 insertions(+), 6 deletions(-) create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java create mode 100644 compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java new file mode 100644 index 00000000000..36fe60cf2ad --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListRegionalDisksInConsistencyGroup.java @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_list_disks_regional] +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.ListRegionDisksRequest; +import com.google.cloud.compute.v1.RegionDisksClient; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class ListRegionalDisksInConsistencyGroup { + public static void main(String[] args) + throws IOException, InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP_ID"; + // Region of the disk. + String disksLocation = "us-central1"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + listRegionalDisksInConsistencyGroup( + project, consistencyGroupName, consistencyGroupLocation, disksLocation); + } + + // Lists disks in a consistency group. + public static List listRegionalDisksInConsistencyGroup(String project, + String consistencyGroupName, String consistencyGroupLocation, String disksLocation) + throws IOException { + String filter = String + .format("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + List disksList = new ArrayList<>(); + + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (RegionDisksClient disksClient = RegionDisksClient.create()) { + ListRegionDisksRequest request = + ListRegionDisksRequest.newBuilder() + .setProject(project) + .setRegion(disksLocation) + .build(); + + RegionDisksClient.ListPagedResponse response = disksClient.list(request); + for (Disk disk : response.iterateAll()) { + if (disk.getResourcePoliciesList().contains(filter)) { + disksList.add(disk); + } + } + } + System.out.println(disksList.size()); + return disksList; + } +} +// [END compute_consistency_group_list_disks_regional] \ No newline at end of file diff --git a/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java new file mode 100644 index 00000000000..2434802d860 --- /dev/null +++ b/compute/cloud-client/src/main/java/compute/disks/consistencygroup/ListZonalDisksInConsistencyGroup.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package compute.disks.consistencygroup; + +// [START compute_consistency_group_list_disks_zonal] +import com.google.cloud.compute.v1.Disk; +import com.google.cloud.compute.v1.DisksClient; +import com.google.cloud.compute.v1.ListDisksRequest; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class ListZonalDisksInConsistencyGroup { + public static void main(String[] args) + throws IOException, InterruptedException, ExecutionException { + // TODO(developer): Replace these variables before running the sample. + // Project ID or project number of the Cloud project you want to use. + String project = "YOUR_PROJECT_ID"; + // Name of the consistency group. + String consistencyGroupName = "CONSISTENCY_GROUP_ID"; + // Zone of the disk. + String disksLocation = "us-central1-a"; + // Region of the consistency group. + String consistencyGroupLocation = "us-central1"; + + listZonalDisksInConsistencyGroup( + project, consistencyGroupName, consistencyGroupLocation, disksLocation); + } + + // Lists disks in a consistency group. + public static List listZonalDisksInConsistencyGroup(String project, + String consistencyGroupName, String consistencyGroupLocation, String disksLocation) + throws IOException { + String filter = String + .format("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/resourcePolicies/%s", + project, consistencyGroupLocation, consistencyGroupName); + List disksList = new ArrayList<>(); + // Initialize client that will be used to send requests. This client only needs to be created + // once, and can be reused for multiple requests. + try (DisksClient disksClient = DisksClient.create()) { + ListDisksRequest request = + ListDisksRequest.newBuilder() + .setProject(project) + .setZone(disksLocation) + .build(); + DisksClient.ListPagedResponse response = disksClient.list(request); + + for (Disk disk : response.iterateAll()) { + if (disk.getResourcePoliciesList().contains(filter)) { + disksList.add(disk); + } + } + } + System.out.println(disksList.size()); + return disksList; + } +} +// [END compute_consistency_group_list_disks_zonal] diff --git a/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java index 6e901460ce0..61d4ec81940 100644 --- a/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java +++ b/compute/cloud-client/src/test/java/compute/disks/ConsistencyGroupIT.java @@ -27,7 +27,10 @@ import com.google.api.gax.longrunning.OperationFuture; import com.google.cloud.compute.v1.AddResourcePoliciesRegionDiskRequest; +import com.google.cloud.compute.v1.DisksClient; import com.google.cloud.compute.v1.InsertResourcePolicyRequest; +import com.google.cloud.compute.v1.ListDisksRequest; +import com.google.cloud.compute.v1.ListRegionDisksRequest; import com.google.cloud.compute.v1.Operation; import com.google.cloud.compute.v1.RegionDisksClient; import com.google.cloud.compute.v1.RemoveResourcePoliciesRegionDiskRequest; @@ -35,6 +38,8 @@ import compute.disks.consistencygroup.AddDiskToConsistencyGroup; import compute.disks.consistencygroup.CreateConsistencyGroup; import compute.disks.consistencygroup.DeleteConsistencyGroup; +import compute.disks.consistencygroup.ListRegionalDisksInConsistencyGroup; +import compute.disks.consistencygroup.ListZonalDisksInConsistencyGroup; import compute.disks.consistencygroup.RemoveDiskFromConsistencyGroup; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.Test; @@ -145,4 +150,46 @@ public void testDeleteConsistencyGroup() throws Exception { assertEquals(Operation.Status.DONE, status); } } -} + + @Test + public void testListRegionalDisksInConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(RegionDisksClient.class)) { + RegionDisksClient mockClient = mock(RegionDisksClient.class); + RegionDisksClient.ListPagedResponse mockResponse = + mock(RegionDisksClient.ListPagedResponse.class); + + mockedRegionDisksClient.when(RegionDisksClient::create).thenReturn(mockClient); + when(mockClient.list(any(ListRegionDisksRequest.class))) + .thenReturn(mockResponse); + + ListRegionalDisksInConsistencyGroup.listRegionalDisksInConsistencyGroup( + PROJECT_ID, CONSISTENCY_GROUP_NAME, REGION, REGION); + + verify(mockClient, times(1)) + .list(any(ListRegionDisksRequest.class)); + verify(mockResponse, times(1)).iterateAll(); + } + } + + @Test + public void testListZonalDisksInConsistencyGroup() throws Exception { + try (MockedStatic mockedRegionDisksClient = + mockStatic(DisksClient.class)) { + DisksClient mockClient = mock(DisksClient.class); + DisksClient.ListPagedResponse mockResponse = + mock(DisksClient.ListPagedResponse.class); + + mockedRegionDisksClient.when(DisksClient::create).thenReturn(mockClient); + when(mockClient.list(any(ListDisksRequest.class))) + .thenReturn(mockResponse); + + ListZonalDisksInConsistencyGroup.listZonalDisksInConsistencyGroup( + PROJECT_ID, CONSISTENCY_GROUP_NAME, REGION, REGION); + + verify(mockClient, times(1)) + .list(any(ListDisksRequest.class)); + verify(mockResponse, times(1)).iterateAll(); + } + } +} \ No newline at end of file diff --git a/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java index c30de733a53..d8fac41a64b 100644 --- a/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java +++ b/compute/cloud-client/src/test/java/compute/disks/HyperdiskIT.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.RETURNS_DEEP_STUBS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.times; @@ -41,7 +42,6 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.MockedStatic; -import org.mockito.Mockito; @RunWith(JUnit4.class) @Timeout(value = 5, unit = TimeUnit.MINUTES) @@ -71,8 +71,8 @@ public void testCreateHyperdiskStoragePool() throws Exception { mockStatic(StoragePoolsClient.class)) { StoragePoolsClient mockClient = mock(StoragePoolsClient.class); OperationFuture mockFuture = - mock(OperationFuture.class, Mockito.RETURNS_DEEP_STUBS); - Operation operation = mock(Operation.class, Mockito.RETURNS_DEEP_STUBS); + mock(OperationFuture.class, RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, RETURNS_DEEP_STUBS); mockedStoragePoolsClient.when(StoragePoolsClient::create).thenReturn(mockClient); when(mockClient.insertAsync(any(InsertStoragePoolRequest.class))) @@ -110,8 +110,8 @@ public void testCreateDiskInStoragePool() throws Exception { try (MockedStatic mockedDisksClient = mockStatic(DisksClient.class)) { DisksClient mockClient = mock(DisksClient.class); OperationFuture mockFuture = - mock(OperationFuture.class, Mockito.RETURNS_DEEP_STUBS); - Operation operation = mock(Operation.class, Mockito.RETURNS_DEEP_STUBS); + mock(OperationFuture.class, RETURNS_DEEP_STUBS); + Operation operation = mock(Operation.class, RETURNS_DEEP_STUBS); mockedDisksClient.when(DisksClient::create).thenReturn(mockClient); when(mockClient.insertAsync(any(InsertDiskRequest.class))).thenReturn(mockFuture); From f4dd7c9a668f75f0d500637986a281407cc28115 Mon Sep 17 00:00:00 2001 From: "eapl.me" <64097272+eapl-gemugami@users.noreply.github.com> Date: Fri, 13 Dec 2024 10:55:24 -0600 Subject: [PATCH 65/66] chore(gae): delete sample gae_java8_datastore_interface_2 (#9812) --- .../com/example/appengine/QueriesTest.java | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java b/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java index c242a669d41..6c36df45fa8 100644 --- a/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java +++ b/appengine-java8/datastore/src/test/java/com/example/appengine/QueriesTest.java @@ -430,32 +430,6 @@ public void queryInterface_multipleFilters_printsMatchedEntities() throws Except assertThat(buf.toString()).doesNotContain("Charlie"); } - @Test - public void queryInterface_singleFilter_returnsMatchedEntities() throws Exception { - // Arrange - Entity a = new Entity("Person", "a"); - a.setProperty("height", 100); - Entity b = new Entity("Person", "b"); - b.setProperty("height", 150); - Entity c = new Entity("Person", "c"); - c.setProperty("height", 300); - datastore.put(ImmutableList.of(a, b, c)); - - // Act - long minHeight = 150; - // [START gae_java8_datastore_interface_2] - Filter heightMinFilter = - new FilterPredicate("height", FilterOperator.GREATER_THAN_OR_EQUAL, minHeight); - - Query q = new Query("Person").setFilter(heightMinFilter); - // [END gae_java8_datastore_interface_2] - - // Assert - List results = - datastore.prepare(q.setKeysOnly()).asList(FetchOptions.Builder.withDefaults()); - assertWithMessage("query results").that(results).containsExactly(b, c); - } - @Test public void queryInterface_orFilter_printsMatchedEntities() throws Exception { // Arrange From 17d5f90cc6d4663d93ded1c0891b0b3b6196fa0e Mon Sep 17 00:00:00 2001 From: Jack Wotherspoon Date: Mon, 16 Dec 2024 17:31:48 -0500 Subject: [PATCH 66/66] chore: Update blunderbuss.yml (#9858) --- .github/blunderbuss.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index f9bbd1cfac6..d2733b96f92 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -38,7 +38,7 @@ assign_issues_by: - labels: - 'api: cloudsql' to: - - GoogleCloudPlatform/infra-db-sdk + - GoogleCloudPlatform/cloud-sql-connectors - labels: - 'api: spanner' to: @@ -115,7 +115,7 @@ assign_prs_by: - labels: - 'api: cloudsql' to: - - GoogleCloudPlatform/infra-db-sdk + - GoogleCloudPlatform/cloud-sql-connectors - labels: - 'api: spanner' to: