-
Notifications
You must be signed in to change notification settings - Fork 113
/
Copy path.projenrc.ts
1918 lines (1820 loc) · 114 KB
/
.projenrc.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import { typescript } from 'projen';
import { NodePackageManager } from 'projen/lib/javascript';
import { READMEComponent } from './projenrc/projects/core-readme-component';
import { DotNetQuickStartPOCs } from './projenrc/projects/dot-net-quickstart-pocs';
import { POCReadmeDetails } from './projenrc/projects/resources/types';
import { StreamlitQuickStartPOC } from './projenrc/projects/streamlit-quickstart-poc';
/**
* Base project for repo
*/
const project = new typescript.TypeScriptProject({
authorName: 'AWS',
jest: false,
packageManager: NodePackageManager.NPM,
defaultReleaseBranch: 'main',
name: 'genai-quickstart-pocs',
projenrcTs: true,
github: false,
deps: ['nunjucks'],
devDeps: ['@types/nunjucks'],
gitignore: [
'**/.DS_Store',
'**/__pycache__/',
'.env/',
'venv',
'output/',
'genai-quickstart-pocs-python/amazon-bedrock-alt-text-generator/files/',
'genai-quickstart-pocs-python/amazon-bedrock-image-guardrails-poc/generated-images/',
'temp/',
'cdk.out',
],
tsconfig: {
compilerOptions: {
rootDir: '.',
sourceRoot: '.',
},
include: [
'genai-quickstart-pocs-python/**/*',
'projenrc/**/*',
],
},
});
const pythonPocs: Array<StreamlitQuickStartPOC> = [];
/**
* Python POCs
*/
pythonPocs.push(
new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Alt Text Generator',
pocPackageName: 'amazon-bedrock-alt-text-generator',
additionalDeps: [
'langchain@^0.2',
'langchain-community@^0.2',
'langchain-aws',
'pypdf',
'pillow',
'pymupdf',
'reportlab',
],
pocDescription:
'This POC demonstrates how to use the Amazon Bedrock Alt Text Generator to generate alt text for images in PDF documents.',
readme: {
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'pdf_image_alt_text_generator/generator.py',
description:
'The is the logic that extracts the data from PDF and calls the Bedrock Model for inference',
},
{
name: 'pdf_image_alt_text_generator/download_results.py',
description:
'generates a PDF with all images and their alt text results, as well as input/output token usage, calculated in a table.',
},
],
},
},
}),
);
pythonPocs.push(
new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Amazon Athena POC',
pocPackageName: 'amazon-bedrock-amazon-athena-poc',
readme: {
additionalPrerequisits: [
'Access to Amazon Athena and the ability to create an Amazon Athena database and tables.',
],
pocGoal: {
overview:
'The goal of this POC is to provide users with the abilitity to use Amazon Bedrock and generative AI to take natural language questions and transform them into relational database querties against Amazon Athena.\n' +
'The POC comes with a basic frontend to help users stand up a proof-of-concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user makes a request, asking a natural language question based on the database available in Amazon Athena to the GenAI app (app.py).',
'This natural language question is passed into Amazon Bedrock, which takes the natural language question and creates a SQL query (amazon_athena_bedrock_query.py).',
'The created SQL query is then executed against your Amazon Athena database to begin retrieving the data (amazon_athena_bedrock_query.py).',
'The data is retrieved from your Amazon Athena Database and is passed back into Amazon Bedrock, to generate a natural language answer based on the retrieved data (amazon_athena_bedrock_query.py).',
'The LLM returns a natural language response to the user through the streamlit frontend based on the retrieved data (app.py).',
],
},
fileWalkthrough: {
files: [
{
name: 'amazon_athena_bedrock_query.py',
description:
'contains connectors into your Amazon Athena database and the interaction',
},
{
name: 'moma_examples.yaml',
description:
'contains several samples prompts that will be used to implement a few-shot prompting technique.',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root folder of this POC. Within the .env file you just created you will need to configure the .env to contain:',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
\taws_access_key_id=<AWS_ACCESS_KEY_ID>
\taws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
\tregion_name=<AWS_REGION>
\tdatabase_name=<ATHENA_DATABASE_NAME>
\ts3_staging_dir=<S3_STAGING_DIRECTORY_BUCKET_PATH> example -> s3://sample-bucket/`,
},
{
instructions: `If you would like to use this repo with the sample data, you will need to upload the two sample data files found in the sample data directory as two individual tables to your Amazon Athena Database.
If you preferred to use your own database/tables in your Amazon Athena database, I would highly recommend reviewing the moma_examples.yaml file in the SampleData directory to see how prompts are constructed for this sample application and spend the time creating 5 - 10 prompts that resemble your dataset more closely.`,
},
],
},
additionalDeps: [
'python-dotenv',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to use natural language questions to query relational data stores, specifically Amazon Athena. This example leverages the MOMA Open Source Database: https://github.com/MuseumofModernArt/collection.',
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock & Amazon RDS POC',
pocPackageName: 'amazon-bedrock-amazon-rds-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to use natural language questions to query relational data stores, specifically Amazon RDS. This example leverages the MOMA Open Source Database: https://github.com/MuseumofModernArt/collection.',
readme: {
pocGoal: {
overview:
'The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to take natural language questions, and transform them into relational database queries against Amazon RDS Databases. This repo is designed to work with\n' +
'Amazon RDS Postgres, but can be configured to work with other database engine types.\n' +
'This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user makes a request, asking a natural language question based on the data in Amazon RDS to the GenAI app (app.py).',
'This natural language question is passed into Amazon Bedrock, which takes the natural language question and creates a SQL query (amazonRDS_bedrock_query.py).',
'The created SQL query is then executed against your Amazon RDS database to begin retrieving the data (amazonRDS_bedrock_query.py).',
'The data is retrieved from your Amazon RDS Database and is passed back into Amazon Bedrock, to generate a natural language answer based on the retrieved data (amazonRDS_bedrock_query.py).',
'The LLM returns a natural language response to the user through the streamlit frontend based on the retrieved data (app.py).',
],
},
additionalPrerequisits: [
'Access to Amazon RDS and the ability to create an Amazon RDS database and tables.',
'Please note that this project leverages the [langchain-experimental](https://pypi.org/project/langchain-experimental/) package which has known vulnerabilities.',
],
fileWalkthrough: {
files: [
{
name: 'amazonRDS_bedrock_query.py',
description:
'contains connectors into your Amazon RDS database and the interaction',
},
{
name: 'moma_examples.yaml',
description:
'contains several samples prompts that will be used to implement a few-shot prompting technique.',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root folder of this POC. Within the .env file you just created you will need to configure the .env to contain:',
command:
'profile_name=<aws_cli_profile_name>' +
'\trds_username=<rds_database_username>' +
'\trds_password=<rds_database_password>' +
'\trds_endpoint=<rds_database_endpoint>' +
'\trds_port=<rds_port>\n' +
'\trds_db_name=<rds_database_name>\n',
},
{
instructions: `If you would like to use this repo with the sample data, you will need to upload the two sample data files found in the sample data directory as two individual tables to your Amazon RDS Postgres Database.
If you preferred to use your own database/tables in your Amazon RDS instance, I would highly recommend reviewing the moma_examples.yaml file in the SampleData directory to see how prompts are constructed for this sample application and spend the time creating 5 - 10 prompts that resemble your dataset more closely.`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock & Amazon Redshift POC',
pocPackageName: 'amazon-bedrock-amazon-redshift-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
pocDescription:
"This is sample code demonstrating the use of Amazon Bedrock and Generative AI to use natural language questions to query relational data stores, specifically Amazon Redshift. This example leverages the MOMA Open Source Database: https://github.com/MuseumofModernArt/collection.\n\n \t**Please Note: If you don't want to build this from scratch, Amazon Redshift now supports GenAI capabilities natively, more information on that can be found [here](https://aws.amazon.com/blogs/aws/amazon-redshift-adds-new-ai-capabilities-to-boost-efficiency-and-productivity/).**",
readme: {
pocGoal: {
architectureImage: true,
overview:
'The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to take natural language questions, and transform them into relational database queries against Amazon Redshift Databases. This repo is designed to work with Amazon Redshift Provisioned Clusters. This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
flowSteps: [
'The user makes a request, asking a natural language question based on the data in Amazon Redshift to the GenAI app (`app.py`)',
'This natural language question is passed into Amazon Bedrock, which takes the natural language question and creates a SQL query (`amazon_redshift_bedrock_query.py`)',
'The created SQL query is then executed against your Amazon Redshift cluster to begin retrieving the data (`amazon_redshift_bedrock_query.py`).',
'The data is retrieved from your Amazon Redshift Database and is passed back into Amazon Bedrock, to generate a natural language answer based on the retrieved data (`amazon_redshift_bedrock_query.py`).',
'The LLM returns a natural language response to the user through the streamlit frontend based on the retrieved data (`app.py`).',
],
},
additionalPrerequisits: [
'Access to Amazon Redshift and the ability to create an Amazon Redshift cluster and tables.',
'Please note that this project leverages the [langchain-experimental](https://pypi.org/project/langchain-experimental/) package which has known vulnerabilities.',
],
fileWalkthrough: {
files: [
{
name: 'amazon_redshift_bedrock_query.py',
description:
'contains connectors into your Amazon Redshift database and the interaction',
},
{
name: 'moma_examples.yaml',
description:
'contains several samples prompts that will be used to implement a few-shot prompting technique.',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root folder of this POC. Within the .env file you just created you will need to configure the .env to contain:',
command:
'profile_name=<aws_cli_profile_name>' +
'\tredshift_host=<REDSHIFT_HOST_URL> example -> redshift-cluster-1.abcdefghijk123.us-east-1.redshift.amazonaws.com' +
'\tredshift_username=<redshift_database_username>' +
'\tredshift_password=<redshift_database_password>' +
'\tredshift_endpoint=<redshift_database_endpoint>' +
'\tredshift_port=<redshift_port>\n' +
'\tredshift_db_name=<redshift_database_name>\n',
},
{
instructions:
'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure lines 19-25 in the amazon_redshift_bedrock_query.py file:',
command: `llm = Bedrock(
credentials_profile_name=os.getenv("profile_name"),
model_id="amazon.titan-text-express-v1",
endpoint_url="https://bedrock-runtime.us-east-1.amazonaws.com",
region_name="us-east-1",
verbose=True
)`,
},
{
instructions: `If you would like to use this repo with the sample data, you will need to upload the two sample data files found in the sample data directory as two individual tables to your Amazon Redshift Database.
If you preferred to use your own database/tables in your Amazon Redshift instance, I would highly recommend reviewing the moma_examples.yaml file in the SampleData directory to see how prompts are constructed for this sample application and spend the time creating 5 - 10 prompts that resemble your dataset more closely.`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Asynchronous Invocation POC',
pocPackageName: 'amazon-bedrock-asynchronous-invocation-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to perform asynchronous invocations of large language models. The application is constructed with a simple streamlit frontend where users can input zero shot requests directly against the LLM of their choice, leveraging asynchronous invocations, to invoke 3 models simultaneously to reduce overall latency.',
readme: {
pocGoal: {
overview:
'The goal of this repo is to provide users the ability to use Amazon Bedrock and Generative AI to perform asynchronous invocations of large language models. The application is constructed with a simple streamlit frontend where users can input zero shot requests directly against the LLM of their choice, leveraging asynchronous invocations, to invoke 3 models simultaneously to reduce overall latency.',
architectureImage: true,
flowSteps: [
'The user makes a request, asking a natural language question based on the data in Amazon RDS to the GenAI app (app.py).',
'This natural language question is passed into Amazon Bedrock, which takes the natural language question and creates a SQL query (amazonRDS_bedrock_query.py).',
'The created SQL query is then executed against your Amazon RDS database to begin retrieving the data (amazonRDS_bedrock_query.py).',
'The data is retrieved from your Amazon RDS Database and is passed back into Amazon Bedrock, to generate a natural language answer based on the retrieved data (amazonRDS_bedrock_query.py).',
'The LLM returns a natural language response to the user through the streamlit frontend based on the retrieved data (app.py).',
],
},
fileWalkthrough: {
includeDefaults: false,
files: [
{
name: 'app.py',
description:
'contains the streamlit frontend and the interaction with Amazon Bedrock',
},
{
name: 'amazon_bedrock_query.py',
description:
'contains connectors into your Amazon Bedrock LLMs and the interaction',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root folder of this POC. Within the .env file you just created you will need to configure the .env to contain:',
command:
'profile_name=<aws_cli_profile_name>' + 'region_name=us-east-1',
},
{
instructions:
'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 86 in the asynchronous_invocation.py file. Currently, this application is only suited to use Anthropic models:',
command: `async def orchestrator(question, modelID1="anthropic.claude-3-sonnet-20240229-v1:0", modelID2="anthropic.claude-3-haiku-20240307-v1:0", modelID3='anthropic.claude-v2:1'):
result = await asyncio.gather(main(question, modelID1), main(question, modelID2), main(question, modelID3))
print(result)
return result`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Chat POC',
pocPackageName: 'amazon-bedrock-chat-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
readme: {
pocGoal: {
architectureImage: true,
overview:
'The goal of this repo is to provide users the ability to use Amazon Bedrock in a similar fashion to ChatGPT. This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
flowSteps: [
'The user makes a "zero-shot" request to the streamlit frontend (`app.py`)',
'The application returns the 3 most semantically similar prompts, and creates a final prompt that contains the 3 returned prompts along with users query (few-shot prompting) (`prompt_finder_and_invoke_llm.py`).',
'The final prompt is passed into Amazon Bedrock to generate an answer to the users question (`prompt_finder_and_invoke_llm.py`).',
'The final answer is generated by Amazon Bedrock and displayed on the frontend application (`app.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'prompt_finder_and_invoke_llm.py',
description:
'houses the logic of the application, including the semantic search against the prompt repository and prompt formatting logic and the Amazon Bedrock API invocations.',
},
{
name: 'chat_history_prompt_generator.py',
description:
'houses the logic required to preserve session state and to dynamically inject the conversation history into prompts to allow for follow-up questions and conversation summary.',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<AWS_CLI_PROFILE_NAME>',
},
{
instructions:
'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 23 in the prompt_finder_and_invoke_llm.py file to set the appropriate region:',
command:
"bedrock = boto3.client('bedrock-runtime', 'us-east-1', endpoint_url='https://bedrock-runtime.us-east-1.amazonaws.com')",
},
{
instructions:
'Since this repository is configured to leverage Claude 3, the prompt payload is structured in a different format. If you wanted to leverage other Amazon Bedrock models you can replace the llm_answer_generator() function in the prompt_finder_and_invoke_llm.py to look like:',
command: `def llm_answer_generator(question_with_prompt):
"""
This function is used to invoke Amazon Bedrock using the finalized prompt that was created by the prompt_finder(question)
function.
:param question_with_prompt: This is the finalized prompt that includes semantically similar prompts, chat history,
and the users question all in a proper multi-shot format.
:return: The final answer to the users question.
"""
# body of data with parameters that is passed into the bedrock invoke model request
# TODO: TUNE THESE PARAMETERS AS YOU SEE FIT
body = json.dumps({"prompt": question_with_prompt,
"max_tokens_to_sample": 8191,
"temperature": 0,
"top_k": 250,
"top_p": 0.5,
"stop_sequences": []
})
# configure model specifics such as specific model
modelId = 'anthropic.claude-v2'
accept = 'application/json'
contentType = 'application/json'
# Invoking the bedrock model with your specifications
response = bedrock.invoke_model(body=body,
modelId=modelId,
accept=accept,
contentType=contentType)
# the body of the response that was generated
response_body = json.loads(response.get('body').read())
# retrieving the specific completion field, where you answer will be
answer = response_body.get('completion')
# returning the answer as a final result, which ultimately gets returned to the end user
return answer`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Claude 3 Image Analysis POC',
pocPackageName: 'amazon-bedrock-claude3-image-analysis-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Multi-Modal Generative AI models from Anthropic to implement an image analysis use case. The application is constructed with a simple streamlit frontend where users can upload a 1 page jpeg, png or PDF and get a description of the image.',
readme: {
pocGoal: {
architectureImage: true,
overview:
'The goal of this repo is to provide users with the ability to analyze images with Generative AI. This can be integrated into applications like image classification, reverse image lookup, object detection and more. This repo comes iwth a basic streamlit front-end to help users stand up a proof of concept and experiment with image analysis use-cases quickly.',
flowSteps: [
'The user uploads an image for bedrock model to analyze. (`app.py`).',
'The streamlit app, takes the image input, and invokes Amazon Bedrock to generate a description (`analyze_images.py`).',
'The image created by Amazon Bedrock is returned and displayed on the streamlit app (`app.py`).',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'analyze_images.py',
description:
'house the logic of the application, including the semantic search against the prompt repository and prompt formatting logic and the Amazon Bedrock API invocations.',
},
],
},
extraSteps: [
{
instructions:
'Create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<AWS_CLI_PROFILE_NAME>',
},
{
instructions:
'Depending on the region and model that you are planning to use with Amazon Bedrock (please note that only a few models can analyze images), you may need to reconfigure model paramaters in the image_analysis file. You might also choose to customize your prompts if this POC is for an industry-specific use-case analyzing a specific type of image:',
command: `brclient = boto3.client('bedrock-runtime', 'us-east-1', endpoint_url='https://bedrock-runtime.us-east-1.amazonaws.com',config=config)
#model params
model_id = "anthropic.claude-3-sonnet-20240229-v1:0"`,
}, {
instructions: 'You may also choose to customize the system prompt to align with a pecific use-case, or to get specific responses back about your images. ',
command: 'system_prompt = "You are an expert in image analysis and classification. The question will be contained within the <question></question> tags. Before answering, think step by step in <thinking> tags as you analyze every part of the image. Provide your answer within the <answer></answer> tags. Incude a JSON structured response describing image attributes contained within the <json></json> tags. Always add line breaks between each section of your response"',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Claude 3 Long Form Output POC',
pocPackageName: 'amazon-bedrock-claude3-long-form-output-poc',
additionalDeps: ['botocore', 'pypdf'],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to translate text from a source to target language.',
readme: {
pocGoal: {
overview: 'The goal of this repository is to provide users with the ability to use Amazon Bedrock to generate long form content. This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user either selects the default prompt or inputs a prompt.',
'The application constructs the appropriate prompt and sends it to Amazon Bedrock.',
'The appliction recieves and sends the text to Amazon Bedrock for analysis of accuracy and fluency.',
'The generated text and analysis is displayed on the frontend application.',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'BedrockProcessor.py',
description: 'The logic for interacting with the Amazon Bedrock service.',
},
],
},
extraSteps: [
{ instructions: 'Create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:', command: 'profile_name=<AWS_CLI_PROFILE_NAME>' },
{ instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 23 in the prompt_finder_and_invoke_llm.py file to set the appropriate region:', command: 'bedrock = boto3.client(\'bedrock-runtime\', \'us-east-1\', endpoint_url=\'https://bedrock-runtime.us-east-1.amazonaws.com\')' },
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Claude 3 Multi-Modal POC',
pocPackageName: 'amazon-bedrock-claude3-multi-modal-poc',
additionalDeps: ['pillow'],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Anthropic Claude 3 to satisfy multi-modal use cases. The application is constructed with a simple streamlit frontend where users can input zero shot requests to satisfy a broad range of use cases, including image to text multi-modal style use cases.',
readme: {
pocGoal: {
overview: 'The goal of this repo is to provide users the ability to use Amazon Bedrock (specifically Claude3) and generative AI to leverage its multi-modal capabilities, allowing users to insert text questions, images, or both to get a comprehensive description/or answer based on the image and/or question that was passed in. \n This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user uploads an image file to the streamlit app, with or without a text question. (`app.py`)',
'The user inserts a text question into to the streamlit app, with or without an image. (`app.py`)',
'The streamlit app, takes the image file and/or text and saves it. The image and/or text is passed into Amazon Bedrock (Anthropic Claude 3). (`llm_multi_modal_invoke.py`)',
'A natural language response is returned to the end user, either describing the image, answering a question about the image, or answering a question in general. (`app.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{ name: 'llm_multi_modal_invoke.py', description: 'Houses the logic of the application, including the image encoding and Amazon Bedrock API invocations' },
],
},
extraSteps: [
{
instructions: 'Create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
save_folder=<PATH_TO_ROOT_OF_THIS_REPO>`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Claude 3 Streaming Response POC',
pocPackageName: 'amazon-bedrock-claude3-streaming-response-poc',
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to implement streaming responses. The application is constructed with a simple streamlit frontend where users can input zero shot requests directly against the LLM of their choice, leveraging a streaming response technique.',
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Converse API POC',
pocPackageName: 'amazon-bedrock-converse-api-poc',
pocDescription:
'This is sample code demonstrating the use of the Amazon Bedrock Converse API to help with conversation oriented use cases that require context preservation. The application is constructed with a simple streamlit frontend where users can input zero shot requests to Claude 3, with the Amazon Bedrock Converse API in place to allow users to ask context aware questions.',
readme: {
pocGoal: {
overview: 'The goal of this repo is to provide users the ability to use Amazon Bedrock leveraging its streaming response capabilities. This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user inserts a text question into to the streamlit app. (`app.py`)',
'The streamlit app, takes the text and passes it into Amazon Bedrock. (`invoke_llm_with_streaming.py`)',
'A natural language response is streamed to the end user, answering a question in general. (`app.py`)',
],
},
fileWalkthrough: {
files: [
{
name: 'invoke_llm_with_streaming.py',
description: 'Houses the invocation of Amazon Bedrock with a streaming response, and the basic prompt formatting logic.',
},
],
},
extraSteps: [
{
instructions: 'Create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<AWS_CLI_PROFILE_NAME>',
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 15 in the invoke_llm_with_streaming.py file to set the region or line 51 to change to another Claude 3 model such as Haiku:',
command: `bedrock = boto3.client('bedrock-runtime', 'us-east-1', endpoint_url='https://bedrock-runtime.us-east-1.amazonaws.com')
response = bedrock.invoke_model_with_response_stream(body=json_prompt, modelId="anthropic.claude-3-sonnet-20240229-v1:0",
accept="application/json", contentType="application/json")`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Converse Stream API POC',
pocPackageName: 'amazon-bedrock-converse-stream-api-poc',
pocDescription:
'This is sample code demonstrating the use of the Amazon Bedrock ConverseStream API to help with conversation oriented use cases that require context preservation. The application is constructed with a simple streamlit frontend where users can input zero shot requests to Claude 3, with the Amazon Bedrock ConverseConverseStream API in place to allow users to ask context aware questions and stream the response back.',
readme: {
pocGoal: {
overview: 'The goal of this repo is to provide users the ability to use the Amazon Bedrock ConverseStream API to demonstrate its ability to facilitate conversational GenAI use cases that require context awareness and streaming responses. \n This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user inserts a text question into to the streamlit app. (`app.py`)',
'The streamlit app, takes the text inserted by the user and is passed into an Amazon Bedrock Model using the Converse API. The users question is answered, and both the question and answer are stored. (`invoke_model_conversation_api.py`)',
'The answer to the user\'s question is returned to the front-end application, and allows users to ask follow up questions as the Converse API help preserve context throughout the users conversation (`app.py`)',
],
},
additionalPrerequisits: [
'Create an Amazon Bedrock Guardrail, information on how to do that can be found [here](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html)',
],
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'invoke_model_converse_stream_api.py',
description: 'Houses the logic of the application, including the Amazon Bedrock Converse API invocation.',
},
],
},
extraSteps: [
{
instructions: 'create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<CLI_profile_name>',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock CSV Chatbot POC',
pocPackageName: 'amazon-bedrock-csv-chatbot-poc',
additionalDeps: ['pandas'],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to implement a chatbot is able to converse with the user based on CSV data provided by the user. The application is constructed with a simple streamlit frontend where users can upload large CSV files and get them analyzed or start chatbot interactions.',
readme: {
pocGoal: {
overview: 'The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to answer questions a user might have on the CSV data provided. \n\nThis repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user uploads a CSV file to the streamlit app. (`app.py`)',
'The streamlit app, takes the CSV file and chunks the document efficient data processing(`csv_data_insights.py`)',
'Once the data is passed into Amazon Bedrock and the user asks the chatbot a question, it creates a response to the user\'s question (`csv_data_insights.py`).',
'After the response is generated, it is presented on the streamlit app (`app.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'csv_data_insights.py',
description: 'Houses the logic of the application and Amazon Bedrock API invocations.',
},
],
},
extraSteps: [
{
instructions: 'reate a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
save_folder=<PATH_TO_ROOT_OF_THIS_REPO>`,
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 10 in the `csv_data_insights.py` file to set your region:',
command: 'bedrock = boto3.client(\'bedrock-runtime\', \'us-east-1\', endpoint_url=\'https://bedrock.us-east-1.amazonaws.com\')',
},
{
instructions: 'Since this repository is configured to leverage Claude 3, the prompt payload is structured in a different format. If you wanted to leverage other Amazon Bedrock models you can replace the line 143 in the `csv_data_insights.py` file to look like:',
command: 'response = bedrock.invoke_model(body=json_prompt, modelId="anthropic.claude-3-sonnet-20240229-v1:0", accept="application/json", contentType="application/json")',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Document Comparison POC',
pocPackageName: 'amazon-bedrock-document-comparison-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
'pypdf',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to implement a document comparison use case. The application is constructed with a simple streamlit frontend where users can upload 2 versions of a document and get all changes between documents listed.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to perform document comparison between two uploaded PDFs.
This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.`,
architectureImage: true,
flowSteps: [
'The user uploads two PDF files to the streamlit app. (`app.py`)',
'The streamlit app, takes the two PDF documents, saves it, and formats it into a prompt with semantically similar examples (`doc_comparer.py`)',
'The finalized few shot prompt containing both uploaded documents is passed into Amazon Bedrock, which generates a list of all differences between the two uploaded documents and returns the final list to the front end (`doc_comparer.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [{
name: 'doc_comparer.py',
description: 'Houses the logic of the application, including the prompt formatting logic and Amazon Bedrock API invocations.',
}],
},
extraSteps: [
{
instructions: 'reate a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
save_folder=<PATH_TO_ROOT_OF_THIS_REPO>`,
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 20 in the doc_comparer.py file to set the appropriate region:',
command: 'bedrock = boto3.client(\'bedrock-runtime\', \'us-east-1\', endpoint_url=\'https://bedrock.us-east-1.amazonaws.com\')',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Aamazon Bedrock Document Generator POC',
pocPackageName: 'amazon-bedrock-document-generator-poc',
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to implement a document generation use case. The application is constructed with a simple streamlit frontend where users can provide details and create a document in the exact format that the you specify.',
readme: {
pocGoal: {
overview: 'The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to perform document generation based on a document template and details inputted by the user.\n\nThis repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.',
architectureImage: true,
flowSteps: [
'The user inserts document details that they would like included in the generated document within the streamlit app. (`app.py`)',
'The streamlit app, takes the document details, and invokes Amazon Bedrock to generate sample document that matches the document structure stated in the prompt containing the inserted document details (`doc_generator.py`)',
'Amazon Bedrock generates a first draft of the sample document and presents it to the frontend (`app.py`)',
'The user inserts refinement details highlighting areas where they would like to add refinements to the previously generated document (`app.py`)',
'The streamlit app takes the refinement details, passes it to Amazon Bedrock along with the document prompt, and the original draft of the document to begin creating the refined version (`doc_generator.py`)',
'Amazon Bedrock is used to generate the refined version of the document based on the user defined refinements and presents it on the frontend (`app.py`)',
],
},
extraSteps: [
{
instructions: 'create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<AWS_CLI_PROFILE_NAME>',
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 12 in the document_generator.py file to set the appropriate region:',
command: 'bedrock = boto3.client(\'bedrock-runtime\', \'us-east-1\', endpoint_url=\'https://bedrock-runtime.us-east-1.amazonaws.com\')',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock GenAI Dynamic Prompt Explained POC',
pocPackageName: 'amazon-bedrock-genai-dynamic-prompting-explained-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
],
pocDescription:
'This is sample code that can be used to provide a hands on explanation as to how Dynamic Prompting works in relation to Gen AI. The application is constructed with a simple streamlit frontend where users can ask questions against a Amazon Bedrock supported LLM and get a deeper understanding of how few-shot and dynamic prompting works.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock in a similar fashion to ChatGPT, in this case
the application has a strong focus on demonstrating on how prompts are dynamically selected based on the user inputted question.
This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.`,
architectureImage: true,
flowSteps: [
'The user makes a "zero-shot" request to the streamlit frontend. (app.py`)',
'The application performs a semantic search of the users query against the 1200+ prompts. (`prompt_finder_and_invoke_llm.py`)',
'The application returns the 3 most semantically similar prompts, and creates a final prompt that contains the 3 returned prompts along with users query (few-shot prompting) (`prompt_finder_and_invoke_llm.py`)',
'The final prompt is passed into Amazon Bedrock to generate an answer to the users question (`prompt_finder_and_invoke_llm.py`)',
'The final answer is generated by Amazon Bedrock and displayed on the frontend application along with 3 most semantically similar prompts (`app.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'prompt_finder_and_invoke_llm.py',
description: 'Houses the logic of the application, including the prompt formatting logic and Amazon Bedrock API invocations.',
},
],
},
extraSteps: [
{
instructions: 'create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: 'profile_name=<AWS_CLI_PROFILE_NAME>',
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 12 in the prompt_finder_and_invoke_llm.py file to set the appropriate region:',
command: 'bedrock = boto3.client(\'bedrock-runtime\', \'us-east-1\', endpoint_url=\'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\')',
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Guardrails POC',
pocPackageName: 'amazon-bedrock-guardrails-poc',
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock Guardrails to help prevent prompt-injection attacks and prevent unintended responses from the LLM. The application is constructed with a simple streamlit frontend where users can input zero shot requests to Claude 3, with Amazon Bedrock Guardrails in place to prevent malicious prompts and responses.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock Guardrails to demonstrate its ability to prevent malicious prompts and responses.
This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.`,
architectureImage: true,
flowSteps: [
'The user inserts a text question into to the streamlit app. (`app.py`)',
'The streamlit app, takes the text inserted by the user and is passed into an Amazon Bedrock Guardrail to check for prompt injection. If the prompt is detected as malicious or triggers the guardrail a response will be returned to the end user saying the request is blocked (`invoke_model_with_guardrails.py`)',
'If the prompt does not trigger the guardrail it is passed into the bedrock model the user has specified (`invoke_model_with_guardrails.py`)',
'A response is returned by the Amazon Bedrock Model of choice and is passed into the Amazon Bedrock Guardrail (`invoke_model_with_guardrails.py`). If the response is detected as malicious or triggers the guardrail a response will be returned to the end user saying the request is blocked (`invoke_model_with_guardrails.py`)',
'If the response does not trigger a guardrail, a natural language response is returned to the end user answering the initial text question inserted by the end user (`app.py`)',
],
},
fileWalkthrough: {
includeDefaults: true,
files: [
{
name: 'invoke_model_with_guardrails.py',
description: 'Houses the logic of the application, including the prompt formatting logic and Amazon Bedrock API invocations.',
},
],
},
extraSteps: [
{
instructions: 'create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
region_name=<REGION>
guardrail_identifier=<Guardrail_Identifier>
guardrail_version=<Guardrail_Version> (this is just a number i.e. 1,2,3 etc...)`,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Image Generation POC',
pocPackageName: 'amazon-bedrock-image-generation-poc',
additionalDeps: ['pillow'],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to implement an image generation use case. The application is constructed with a simple streamlit frontend where users can input text requests to generate images based on the text input.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI to create images based on text input requests.
This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.`,
architectureImage: true,
flowSteps: [
'The user inputs a text request asking to generate an image. (`app.py`)',
'The streamlit app, takes the text input, and invokes Amazon Bedrock to generate an image (`image_generation.py`)',
'The image created by Amazon Bedrock is returned and displayed on the streamlit app (`app.py`)',
],
},
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Knowledgebases RAG POC',
pocPackageName: 'amazon-bedrock-knowledgebases-rag-poc',
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI to create vector embeddings for your data sources using Amazon Bedrock Knowledge bases with the ability ask questions against the stored documents. The application is constructed with a RAG based architecture where users can ask questions against the Knowledge bases.',
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock LangChain RAG POC',
pocPackageName: 'amazon-bedrock-langchain-rag-poc',
additionalDeps: [
'langchain@^0.1',
'langchain-community',
'langchain-experimental',
'python-dotenv',
],
pocDescription:
'This is sample code demonstrating the use of Amazon Bedrock and Generative AI using Langchain as orchestrator with the ability ask questions against the stored documents. This sample uses Knowledge bases as to retrieve the stored documents, however you can extend or update this sample to retrieve your stored documents from any Vector DB.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock and generative AI using Langchain as orchestrator to create RAG based applications.
This repo comes with a basic frontend to help users stand up a proof of concept in just a few minutes.`,
architectureImage: true,
flowSteps: [
'The user makes a request to the GenAI app (`app.py`)',
'The app issues a get contexts query to the Amazon Bedrock Knowledge bases using Langchain based on the user request. (`query_with_langchain.py`)',
'The knowledge bases returns search results related to the relevant documents from the ingested data. (`query_with_langchain.py`)',
'The app sends the user request and along with the data retrieved from the Amazon Bedrock Knowlegebases as context in the LLM prompt to a LLM available within Bedrock using Langchain. (`query_with_langchain.py`)',
'The LLM returns a succinct response to the user request based on the retrieved data. (`query_with_langchain.py`)',
'The response from the LLM is sent back to the user. (`app.py`)',
],
},
extraSteps: [
{
instructions: `Now that we have successfully cloned the repo, created and activated the virtual environment and installed the necessary dependencies, it is time for us to create Amazon Bedrock Knowledge base.
To create our Amazon Bedrock Knowledge base we will:
1. Go to the Amazon Bedrock Service homepage within the AWS console and on the left-hand side we will select "Knowledge bases" under the "Orchestration" drop down ![Alt text](images/amazon_bedrock_homepage.png "Amazon Bedrock Homepage")
2. We will then click on "Create knowledge base" ![Alt text](images/knowledgeBase_homepage.png "Amazon Bedrock Create Knowledge base")
3. In the Knowledge base details section, you can optionally change the default name and provide a description for your knowledge base.In the IAM permissions section, choose an AWS Identity and Access Management (IAM) role that provides Amazon Bedrock permission to access other AWS services. You can let Amazon Bedrock create the service role or choose a custom role that you have created. Optionally, add tags to your knowledge base. Select Next. ![Alt text](images/kb_first_page.png "Knowledge base details")
4. On the Set up data source page, provide the information for the data source to use for the knowledge base: Optionally, change the default Data source name. Provide the S3 URI of the object containing the files for the data source that you prepared. Select Next. ![Alt text](images/kb_datasource_page.png "Set up Data Source")
5. In the Embeddings model section, choose a supported embeddings model to convert your data into vector embeddings for the knowledge base. In the Vector database section, choose Quick create a new vector store and select Next ![Alt text](images/kb_vectordb_page.png "Select Embeddings Model")
6. On the Review and create page, check the configuration and details of your knowledge base. Choose Edit in any section that you need to modify. When you are satisfied, select Create knowledge base.`,
},
{
instructions: 'create a .env file in the root of this repo. Within the .env file you just created you will need to configure the .env to contain:',
command: `profile_name=<AWS_CLI_PROFILE_NAME>
knowledge_base_id=<Knowledge Base Id of the the Knowledge Base we created in the previous step>
llm_model = < LLM model that you want to use for the POC, either "amazon-titan" or "anthropic-claude >`,
},
{
instructions: 'Depending on the region and model that you are planning to use Amazon Bedrock in, you may need to reconfigure line 19 and 20 in the query_with_langchain.py file to change the region:',
command: `bedrock = boto3.client('bedrock-runtime', 'us-east-1')
bedrock_agent_runtime = boto3.client('bedrock-agent-runtime','us-east-1')`,
},
{
instructions: `Since this repository is configured to leverage Amazon Titan or Anthropic Claude 3 models, the prompt payload is structured in formats required for the invocation of these two models.
If you wanted to leverage other Amazon Bedrock models, you can update \`query_with_langchain.py\` code.
For example if you to call Amazon Titan Lite instead of Amazon Titan Express, you can update call_titan funciton in \`query_with_langchain.py\` to look like the code below.\n
You can then change the model_id param value to the other available models from Amazon Titan.
This repository is configured to leverage Knowledge bases.
If you want to use other Vector DBs that are not supported in Amazon Bedrock Knowledge Bases, or want to directly retreive contexts from Vector DB using langchain, you can refere to [this Langchain documentation](https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/).
`,
command: `def call_titan(query, retriever):
"""
This function is used to call Amazon Titan Express LLM model using Langchain.
:param query: Contains the Question asked by the user
:param retriever: Contains the contexts retrieved from the Amazon Bedrock Knowledge base
:return: Response recieved from LLM for the input user query
"""
# Setting Model kwargs
model_kwargs = {
"maxTokenCount": 4096,
"stopSequences": [],
"temperature": 0,
"topP": 1,
}
# Setting LLM method from the Language Bedrock library
llm = Bedrock(
client=bedrock, model_id="amazon.titan-text-lite-v1", model_kwargs={} #updating the model_id param to to Amazon Titan Lite
)
# Invoke Amazon Titan using the Langchain llm method
qa = RetrievalQA.from_chain_type(
llm=llm, retriever=retriever, return_source_documents=True
)
answer = qa(query)
# Returning the response
return answer `,
},
],
},
}));
pythonPocs.push(new StreamlitQuickStartPOC({
parentProject: project,
pocName: 'Amazon Bedrock Meeting Minutes Summarization POC',
pocPackageName: 'amazon-bedrock-meeting-minutes-summarization-poc',
additionalDeps: ['requests'],
pocDescription:
'This application demonstrates using Amazon Bedrock and Amazon Transcribe to summarize meeting recordings. The streamlit frontend allows users to upload audio, video, or text files of meeting recording. Amazon Transcribe generates a transcript of recording and sent it Amazon Bedrock for summarization of the key discussion points. Users can then download the generated summarized meeting notes.',
readme: {
pocGoal: {
overview: `The goal of this repo is to provide users the ability to use Amazon Bedrock and Amazon Transcribe to create Meeting minutes from audio ,video recordings. If audio
It show case the capablitiy to upload Audio, Video of meeting recording and create summary of meeting.`,
architectureImage: true,
flowSteps: [
'The user uploads a meeting recording video or audio or .txt file using Upload File button.',
'Meeting recording is already present in Amazon Transcribe Job History Transcription text is retrieved from Job History',
'If Meeting recording is not present in Amazon Transcribe Job history, recording file is temporary upload on S3 and Sent to Amazon Transcribe Job to generate transcription text ',
'Transcription text is sent to Amazon Bedrock LLM for summarization',
'Summarization notes are updated in streamlit app',
'User can download the meeting notes',
],
},
additionalPrerequisits: [
'Access to Amazon Transcribe via your CLI Credentials',
'Access to S3 Bucket with put,get,delete object permissions via your CLI credentials and accessible by Transcribe',
],
extraSteps: [