mirror of
https://git.mirrors.martin98.com/https://github.com/langgenius/dify.git
synced 2025-04-22 21:59:55 +08:00
Add bedrock (#2119)
Co-authored-by: takatost <takatost@users.noreply.github.com> Co-authored-by: Garfield Dai <dai.hai@foxmail.com> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Charlie.Wei <luowei@cvte.com> Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: Benjamin <benjaminx@gmail.com>
This commit is contained in:
parent
a18dde9b0d
commit
14a2eeba0c
@ -0,0 +1,14 @@
|
||||
<svg width="140" height="24" viewBox="0 0 140 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M131.701 17.9999V6.8999H133.876V13.6049L136.531 10.3349H139.141L135.976 13.9949L139.381 17.9999H136.711L133.876 14.5049V17.9999H131.701Z" fill="#252F3E"/>
|
||||
<path d="M129.847 17.6699C129.577 17.8299 129.252 17.9499 128.872 18.0299C128.492 18.1199 128.097 18.1649 127.687 18.1649C126.467 18.1649 125.532 17.8249 124.882 17.1449C124.242 16.4649 123.922 15.4849 123.922 14.2049C123.922 12.9349 124.262 11.9449 124.942 11.2349C125.622 10.5249 126.567 10.1699 127.777 10.1699C128.507 10.1699 129.182 10.3299 129.802 10.6499V12.1049C129.212 11.9349 128.672 11.8499 128.182 11.8499C127.482 11.8499 126.967 12.0299 126.637 12.3899C126.307 12.7399 126.142 13.2999 126.142 14.0699V14.2799C126.142 15.0399 126.302 15.5999 126.622 15.9599C126.952 16.3099 127.457 16.4849 128.137 16.4849C128.627 16.4849 129.197 16.3949 129.847 16.2149V17.6699Z" fill="#252F3E"/>
|
||||
<path d="M118.51 18.2249C117.32 18.2249 116.39 17.8699 115.72 17.1599C115.05 16.4399 114.715 15.4399 114.715 14.1599C114.715 12.8899 115.05 11.8999 115.72 11.1899C116.39 10.4699 117.32 10.1099 118.51 10.1099C119.7 10.1099 120.63 10.4699 121.3 11.1899C121.97 11.8999 122.305 12.8899 122.305 14.1599C122.305 15.4399 121.97 16.4399 121.3 17.1599C120.63 17.8699 119.7 18.2249 118.51 18.2249ZM118.51 16.5449C119.56 16.5449 120.085 15.7499 120.085 14.1599C120.085 12.5799 119.56 11.7899 118.51 11.7899C117.46 11.7899 116.935 12.5799 116.935 14.1599C116.935 15.7499 117.46 16.5449 118.51 16.5449Z" fill="#252F3E"/>
|
||||
<path d="M108.727 17.9998V10.3348H110.527L110.797 11.4748C111.197 11.0348 111.572 10.7248 111.922 10.5448C112.282 10.3548 112.662 10.2598 113.062 10.2598C113.252 10.2598 113.452 10.2748 113.662 10.3048V12.3298C113.382 12.2698 113.072 12.2398 112.732 12.2398C112.082 12.2398 111.477 12.3548 110.917 12.5848V17.9998H108.727Z" fill="#252F3E"/>
|
||||
<path d="M104.417 17.9999L104.237 17.3249C103.617 17.8849 102.882 18.1649 102.032 18.1649C101.402 18.1649 100.847 18.0099 100.367 17.6999C99.8866 17.3799 99.5116 16.9199 99.2416 16.3199C98.9816 15.7199 98.8516 15.0149 98.8516 14.2049C98.8516 12.9649 99.1466 11.9749 99.7366 11.2349C100.327 10.4849 101.107 10.1099 102.077 10.1099C102.867 10.1099 103.552 10.3349 104.132 10.7849V6.8999H106.322V17.9999H104.417ZM102.752 16.5149C103.232 16.5149 103.692 16.3749 104.132 16.0949V12.1349C103.702 11.8849 103.207 11.7599 102.647 11.7599C102.117 11.7599 101.722 11.9599 101.462 12.3599C101.202 12.7499 101.072 13.3449 101.072 14.1449C101.072 14.9449 101.207 15.5399 101.477 15.9299C101.757 16.3199 102.182 16.5149 102.752 16.5149Z" fill="#252F3E"/>
|
||||
<path d="M92.4625 14.6999C92.5025 15.3599 92.7025 15.8399 93.0625 16.1399C93.4225 16.4299 93.9875 16.5749 94.7575 16.5749C95.4275 16.5749 96.2075 16.4499 97.0975 16.1999V17.6549C96.7475 17.8349 96.3275 17.9749 95.8375 18.0749C95.3575 18.1749 94.8575 18.2249 94.3375 18.2249C93.0675 18.2249 92.0975 17.8799 91.4275 17.1899C90.7675 16.4999 90.4375 15.4899 90.4375 14.1599C90.4375 12.8799 90.7675 11.8849 91.4275 11.1749C92.0875 10.4649 93.0025 10.1099 94.1725 10.1099C95.1625 10.1099 95.9225 10.3849 96.4525 10.9349C96.9925 11.4749 97.2625 12.2499 97.2625 13.2599C97.2625 13.4799 97.2475 13.7299 97.2175 14.0099C97.1875 14.2899 97.1525 14.5199 97.1125 14.6999H92.4625ZM94.0975 11.6249C93.6075 11.6249 93.2175 11.7749 92.9275 12.0749C92.6475 12.3649 92.4875 12.7899 92.4475 13.3499H95.3875V13.0949C95.3875 12.1149 94.9575 11.6249 94.0975 11.6249Z" fill="#252F3E"/>
|
||||
<path d="M81.1992 18V7.60498H84.9342C85.9342 7.60498 86.7392 7.85998 87.3492 8.36998C87.9692 8.87998 88.2792 9.54498 88.2792 10.365C88.2792 10.875 88.1592 11.315 87.9192 11.685C87.6892 12.045 87.3442 12.325 86.8842 12.525C87.5242 12.715 88.0092 13.03 88.3392 13.47C88.6792 13.9 88.8492 14.43 88.8492 15.06C88.8492 15.96 88.5142 16.675 87.8442 17.205C87.1742 17.735 86.2742 18 85.1442 18H81.1992ZM83.3292 13.47V16.395H85.0992C86.1192 16.395 86.6292 15.915 86.6292 14.955C86.6292 13.965 86.0842 13.47 84.9942 13.47H83.3292ZM83.3292 9.20998V11.94H84.6342C85.6042 11.94 86.0892 11.49 86.0892 10.59C86.0892 9.66998 85.6442 9.20998 84.7542 9.20998H83.3292Z" fill="#252F3E"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M62.0002 20.4425L58.6644 21.5548L57.3636 20.6872L58.7799 20.2142L58.3454 18.9107L55.9143 19.7206L55.1251 19.1953V15.4374C55.1251 15.1775 54.9779 14.9396 54.7456 14.8227L52.375 13.6375V10.3621L54.4376 9.33087L56.5001 10.3621V12.6873C56.5001 12.9486 56.6472 13.1864 56.8796 13.3033L59.6297 14.6783L60.2457 13.4477L57.8751 12.2624V10.3621L60.2457 9.17824C60.4781 9.06136 60.6252 8.82349 60.6252 8.56223V6.49969H59.2502V8.13735L57.1876 9.16862L55.1251 8.13735V4.80566L56.5001 3.88852V6.49969H57.8751V2.97275L58.6644 2.44612L62.0002 3.55851V20.4425ZM69.5628 18.8749C69.941 18.8749 70.2504 19.1829 70.2504 19.5624C70.2504 19.9419 69.941 20.2499 69.5628 20.2499C69.1847 20.2499 68.8753 19.9419 68.8753 19.5624C68.8753 19.1829 69.1847 18.8749 69.5628 18.8749ZM68.1878 3.74964C68.566 3.74964 68.8753 4.05765 68.8753 4.43715C68.8753 4.81666 68.566 5.12467 68.1878 5.12467C67.8097 5.12467 67.5003 4.81666 67.5003 4.43715C67.5003 4.05765 67.8097 3.74964 68.1878 3.74964ZM70.9379 11.9998C71.316 11.9998 71.6254 12.3078 71.6254 12.6873C71.6254 13.0668 71.316 13.3748 70.9379 13.3748C70.5597 13.3748 70.2504 13.0668 70.2504 12.6873C70.2504 12.3078 70.5597 11.9998 70.9379 11.9998ZM69.0018 13.3748C69.2865 14.1737 70.0427 14.7498 70.9379 14.7498C72.075 14.7498 73.0004 13.8258 73.0004 12.6873C73.0004 11.5502 72.075 10.6248 70.9379 10.6248C70.0427 10.6248 69.2865 11.2023 69.0018 11.9998H63.3752V9.24974H68.1878C68.5673 9.24974 68.8753 8.94311 68.8753 8.56223V6.37319C69.6742 6.08856 70.2504 5.3323 70.2504 4.43715C70.2504 3.30001 69.325 2.37462 68.1878 2.37462C67.0507 2.37462 66.1253 3.30001 66.1253 4.43715C66.1253 5.3323 66.7014 6.08856 67.5003 6.37319V7.87472H63.3752V3.06213C63.3752 2.7665 63.1855 2.50387 62.905 2.41037L58.7799 1.03534C58.5778 0.967964 58.3578 0.998214 58.1818 1.11509L54.0567 3.86514C53.8656 3.99302 53.7501 4.20752 53.7501 4.43715V8.13735L51.3795 9.32262C51.1471 9.4395 51 9.67738 51 9.93726V14.0623C51 14.3236 51.1471 14.5615 51.3795 14.6783L53.7501 15.8622V19.5624C53.7501 19.7921 53.8656 20.0079 54.0567 20.1344L58.1818 22.8845C58.2959 22.9615 58.4279 23 58.5626 23C58.6355 23 58.7084 22.989 58.7799 22.9642L62.905 21.5892C63.1855 21.4971 63.3752 21.2345 63.3752 20.9375V17.4999H66.5282L67.7011 18.6742L67.7189 18.6563C67.5842 18.9313 67.5003 19.2366 67.5003 19.5624C67.5003 20.6996 68.4257 21.625 69.5628 21.625C70.7 21.625 71.6254 20.6996 71.6254 19.5624C71.6254 18.4253 70.7 17.4999 69.5628 17.4999C69.2356 17.4999 68.9303 17.5838 68.6567 17.7199L68.6746 17.702L67.2996 16.327C67.1703 16.1977 66.9957 16.1249 66.8128 16.1249H63.3752V13.3748H69.0018Z" fill="#252F3E"/>
|
||||
<line x1="43.25" y1="4" x2="43.25" y2="20" stroke="black" stroke-opacity="0.08" stroke-width="0.5"/>
|
||||
<path d="M9.89554 9.62679C9.89554 10.0589 9.94226 10.4093 10.024 10.6663C10.1175 10.9232 10.2342 11.2035 10.3978 11.5072C10.4562 11.6006 10.4795 11.6941 10.4795 11.7758C10.4795 11.8926 10.4094 12.0094 10.2576 12.1262L9.52179 12.6168C9.41667 12.6869 9.31156 12.7219 9.21812 12.7219C9.10132 12.7219 8.98453 12.6635 8.86773 12.5584C8.70422 12.3832 8.56406 12.1963 8.44726 12.0094C8.33047 11.8109 8.21367 11.589 8.0852 11.3203C7.17419 12.3949 6.02958 12.9321 4.65139 12.9321C3.6703 12.9321 2.88777 12.6518 2.31546 12.0912C1.74316 11.5306 1.45117 10.7831 1.45117 9.84871C1.45117 8.85594 1.80156 8.05004 2.51402 7.4427C3.22647 6.83536 4.17252 6.53169 5.37552 6.53169C5.77263 6.53169 6.18142 6.56673 6.61356 6.62513C7.04571 6.68353 7.48954 6.77697 7.95672 6.88208V6.02947C7.95672 5.14182 7.76985 4.5228 7.40778 4.16073C7.03403 3.79866 6.40333 3.62347 5.504 3.62347C5.09521 3.62347 4.67475 3.67019 4.2426 3.7753C3.81046 3.88042 3.38999 4.00889 2.9812 4.17241C2.79433 4.25417 2.65417 4.30089 2.57242 4.32424C2.49066 4.3476 2.43226 4.35928 2.38554 4.35928C2.22203 4.35928 2.14027 4.24249 2.14027 3.99722V3.42491C2.14027 3.23804 2.16363 3.09788 2.22203 3.01613C2.28042 2.93437 2.38554 2.85261 2.54906 2.77085C2.95784 2.56062 3.44839 2.38543 4.02069 2.24527C4.59299 2.09344 5.20033 2.02336 5.84271 2.02336C7.23258 2.02336 8.24871 2.33871 8.90277 2.96941C9.54515 3.60011 9.87218 4.55784 9.87218 5.8426V9.62679H9.89554ZM5.15361 11.4021C5.53904 11.4021 5.93615 11.332 6.35661 11.1919C6.77708 11.0517 7.15083 10.7948 7.46618 10.4444C7.65305 10.2225 7.79321 9.97718 7.86328 9.69687C7.93336 9.41656 7.98008 9.07785 7.98008 8.68074V8.1902C7.64137 8.10844 7.2793 8.03836 6.90555 7.99165C6.53181 7.94493 6.16974 7.92157 5.80767 7.92157C5.02514 7.92157 4.45283 8.0734 4.06741 8.38875C3.68198 8.7041 3.49511 9.14793 3.49511 9.73191C3.49511 10.2809 3.63526 10.6896 3.92725 10.9699C4.20756 11.2619 4.61635 11.4021 5.15361 11.4021ZM14.5323 12.6635C14.3221 12.6635 14.182 12.6285 14.0885 12.5467C13.9951 12.4766 13.9133 12.3131 13.8432 12.0912L11.0985 3.06285C11.0285 2.82925 10.9934 2.67742 10.9934 2.59566C10.9934 2.40879 11.0869 2.30367 11.2737 2.30367H12.4183C12.6402 2.30367 12.7921 2.33871 12.8738 2.42047C12.9673 2.49054 13.0374 2.65406 13.1074 2.87597L15.0696 10.6079L16.8916 2.87597C16.95 2.64238 17.0201 2.49054 17.1135 2.42047C17.207 2.35039 17.3705 2.30367 17.5807 2.30367H18.5151C18.737 2.30367 18.8888 2.33871 18.9823 2.42047C19.0757 2.49054 19.1575 2.65406 19.2042 2.87597L21.0496 10.7013L23.0701 2.87597C23.1402 2.64238 23.222 2.49054 23.3037 2.42047C23.3972 2.35039 23.549 2.30367 23.7592 2.30367H24.8454C25.0323 2.30367 25.1374 2.39711 25.1374 2.59566C25.1374 2.65406 25.1258 2.71246 25.1141 2.78253C25.1024 2.85261 25.079 2.94605 25.0323 3.07453L22.2175 12.1029C22.1475 12.3365 22.0657 12.4883 21.9723 12.5584C21.8788 12.6285 21.727 12.6752 21.5284 12.6752H20.524C20.3021 12.6752 20.1502 12.6401 20.0568 12.5584C19.9634 12.4766 19.8816 12.3248 19.8349 12.0912L18.0246 4.55784L16.2259 12.0795C16.1675 12.3131 16.0974 12.4649 16.004 12.5467C15.9105 12.6285 15.747 12.6635 15.5368 12.6635H14.5323ZM29.5407 12.9788C28.9333 12.9788 28.326 12.9088 27.742 12.7686C27.158 12.6285 26.7025 12.4766 26.3988 12.3014C26.212 12.1963 26.0835 12.0795 26.0368 11.9744C25.9901 11.8693 25.9667 11.7525 25.9667 11.6474V11.0517C25.9667 10.8064 26.0601 10.6896 26.2353 10.6896C26.3054 10.6896 26.3755 10.7013 26.4456 10.7247C26.5156 10.748 26.6208 10.7948 26.7375 10.8415C27.1347 11.0167 27.5668 11.1568 28.0223 11.2503C28.4895 11.3437 28.945 11.3904 29.4122 11.3904C30.148 11.3904 30.7203 11.2619 31.1174 11.005C31.5145 10.748 31.7247 10.3743 31.7247 9.89542C31.7247 9.56839 31.6196 9.29976 31.4094 9.07785C31.1992 8.85594 30.8021 8.65738 30.2298 8.47051L28.5362 7.94493C27.6836 7.6763 27.0529 7.27919 26.6675 6.75361C26.282 6.2397 26.0835 5.6674 26.0835 5.06006C26.0835 4.56952 26.1886 4.13737 26.3988 3.76362C26.6091 3.38987 26.8894 3.06285 27.2398 2.80589C27.5902 2.53726 27.9873 2.33871 28.4545 2.19855C28.9216 2.0584 29.4122 2 29.9261 2C30.183 2 30.4517 2.01168 30.7086 2.04672C30.9773 2.08176 31.2225 2.12848 31.4678 2.17519C31.7014 2.23359 31.9233 2.29199 32.1335 2.36207C32.3438 2.43215 32.5073 2.50222 32.6241 2.5723C32.7876 2.66574 32.9044 2.75918 32.9745 2.86429C33.0445 2.95773 33.0796 3.0862 33.0796 3.24972V3.79866C33.0796 4.04393 32.9861 4.17241 32.811 4.17241C32.7175 4.17241 32.5657 4.12569 32.3671 4.03225C31.7014 3.72858 30.9539 3.57675 30.1246 3.57675C29.4589 3.57675 28.9333 3.68187 28.5712 3.90378C28.2092 4.12569 28.0223 4.4644 28.0223 4.94326C28.0223 5.27029 28.1391 5.5506 28.3727 5.77252C28.6063 5.99443 29.0384 6.21634 29.6575 6.4149L31.316 6.94048C32.1569 7.20911 32.7642 7.58286 33.1263 8.06172C33.4884 8.54059 33.6636 9.08953 33.6636 9.69687C33.6636 10.1991 33.5584 10.6546 33.3599 11.0517C33.1497 11.4488 32.8693 11.7992 32.5073 12.0795C32.1452 12.3715 31.7131 12.5817 31.2108 12.7336C30.6853 12.8971 30.1363 12.9788 29.5407 12.9788Z" fill="#252F3E"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M31.749 18.6553C27.9064 21.4934 22.3235 23.0001 17.5232 23.0001C10.7957 23.0001 4.73399 20.5123 0.155575 16.3778C-0.206494 16.0507 0.120536 15.6069 0.552682 15.8639C5.50484 18.737 11.6133 20.4773 17.932 20.4773C22.195 20.4773 26.8786 19.5896 31.1883 17.7676C31.8307 17.4756 32.3797 18.1881 31.749 18.6553Z" fill="#FF9900"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M33.3507 16.833C32.8601 16.2023 30.1037 16.5293 28.854 16.6811C28.4803 16.7278 28.4219 16.4008 28.7606 16.1555C30.9564 14.6138 34.5654 15.0577 34.9858 15.5716C35.4063 16.0971 34.869 19.7062 32.8134 21.4347C32.4981 21.7034 32.1944 21.5632 32.3345 21.2128C32.8017 20.0565 33.8412 17.452 33.3507 16.833Z" fill="#FF9900"/>
|
||||
</svg>
|
After Width: | Height: | Size: 12 KiB |
@ -0,0 +1,15 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_16762_59518)">
|
||||
<path d="M12.6667 0H3.33333C1.49238 0 0 1.49238 0 3.33333V12.6667C0 14.5076 1.49238 16 3.33333 16H12.6667C14.5076 16 16 14.5076 16 12.6667V3.33333C16 1.49238 14.5076 0 12.6667 0Z" fill="url(#paint0_linear_16762_59518)"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M7.99984 12.093L6.3825 12.6323L5.75184 12.2116L6.4385 11.9823L6.22784 11.3503L5.04917 11.743L4.6665 11.4883V9.66631C4.6665 9.54031 4.59517 9.42497 4.4825 9.3683L3.33317 8.79364V7.20564L4.33317 6.70564L5.33317 7.20564V8.33297C5.33317 8.45964 5.4045 8.57497 5.51717 8.63164L6.8505 9.29831L7.14917 8.70164L5.99984 8.12697V7.20564L7.14917 6.63164C7.26184 6.57497 7.33317 6.45964 7.33317 6.33297V5.33297H6.6665V6.12697L5.6665 6.62697L4.6665 6.12697V4.51164L5.33317 4.06697V5.33297H5.99984V3.62297L6.3825 3.36764L7.99984 3.90697V12.093ZM11.6665 11.333C11.8498 11.333 11.9998 11.4823 11.9998 11.6663C11.9998 11.8503 11.8498 11.9996 11.6665 11.9996C11.4832 11.9996 11.3332 11.8503 11.3332 11.6663C11.3332 11.4823 11.4832 11.333 11.6665 11.333ZM10.9998 3.99964C11.1832 3.99964 11.3332 4.14897 11.3332 4.33297C11.3332 4.51697 11.1832 4.6663 10.9998 4.6663C10.8165 4.6663 10.6665 4.51697 10.6665 4.33297C10.6665 4.14897 10.8165 3.99964 10.9998 3.99964ZM12.3332 7.99964C12.5165 7.99964 12.6665 8.14897 12.6665 8.33297C12.6665 8.51697 12.5165 8.66631 12.3332 8.66631C12.1498 8.66631 11.9998 8.51697 11.9998 8.33297C11.9998 8.14897 12.1498 7.99964 12.3332 7.99964ZM11.3945 8.66631C11.5325 9.05364 11.8992 9.33297 12.3332 9.33297C12.8845 9.33297 13.3332 8.88497 13.3332 8.33297C13.3332 7.78164 12.8845 7.33297 12.3332 7.33297C11.8992 7.33297 11.5325 7.61297 11.3945 7.99964H8.6665V6.66631H10.9998C11.1838 6.66631 11.3332 6.51764 11.3332 6.33297V5.27164C11.7205 5.13364 11.9998 4.76697 11.9998 4.33297C11.9998 3.78164 11.5512 3.33297 10.9998 3.33297C10.4485 3.33297 9.99984 3.78164 9.99984 4.33297C9.99984 4.76697 10.2792 5.13364 10.6665 5.27164V5.99964H8.6665V3.6663C8.6665 3.52297 8.5745 3.39564 8.4385 3.3503L6.4385 2.68364C6.3405 2.65097 6.23384 2.66564 6.1485 2.7223L4.1485 4.05564C4.05584 4.11764 3.99984 4.22164 3.99984 4.33297V6.12697L2.8505 6.70164C2.73784 6.75831 2.6665 6.87364 2.6665 6.99964V8.99964C2.6665 9.12631 2.73784 9.24164 2.8505 9.29831L3.99984 9.87231V11.6663C3.99984 11.7776 4.05584 11.8823 4.1485 11.9436L6.1485 13.277C6.20384 13.3143 6.26784 13.333 6.33317 13.333C6.3685 13.333 6.40384 13.3276 6.4385 13.3156L8.4385 12.649C8.5745 12.6043 8.6665 12.477 8.6665 12.333V10.6663H10.1952L10.7638 11.2356L10.7725 11.227C10.7072 11.3603 10.6665 11.5083 10.6665 11.6663C10.6665 12.2176 11.1152 12.6663 11.6665 12.6663C12.2178 12.6663 12.6665 12.2176 12.6665 11.6663C12.6665 11.115 12.2178 10.6663 11.6665 10.6663C11.5078 10.6663 11.3598 10.707 11.2272 10.773L11.2358 10.7643L10.5692 10.0976C10.5065 10.035 10.4218 9.99964 10.3332 9.99964H8.6665V8.66631H11.3945Z" fill="white"/>
|
||||
</g>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_16762_59518" x1="0" y1="1600" x2="1600" y2="0" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#055F4E"/>
|
||||
<stop offset="1" stop-color="#56C0A7"/>
|
||||
</linearGradient>
|
||||
<clipPath id="clip0_16762_59518">
|
||||
<rect width="16" height="16" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 3.2 KiB |
30
api/core/model_runtime/model_providers/bedrock/bedrock.py
Normal file
30
api/core/model_runtime/model_providers/bedrock/bedrock.py
Normal file
@ -0,0 +1,30 @@
|
||||
import logging
|
||||
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.model_provider import ModelProvider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BedrockProvider(ModelProvider):
|
||||
def validate_provider_credentials(self, credentials: dict) -> None:
|
||||
"""
|
||||
Validate provider credentials
|
||||
|
||||
if validate failed, raise exception
|
||||
|
||||
:param credentials: provider credentials, credentials form defined in `provider_credential_schema`.
|
||||
"""
|
||||
try:
|
||||
model_instance = self.get_model_instance(ModelType.LLM)
|
||||
|
||||
# Use `gemini-pro` model for validate,
|
||||
model_instance.validate_credentials(
|
||||
model='amazon.titan-text-lite-v1',
|
||||
credentials=credentials
|
||||
)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
logger.exception(f'{self.get_provider_schema().provider} credentials validate failed')
|
||||
raise ex
|
71
api/core/model_runtime/model_providers/bedrock/bedrock.yaml
Normal file
71
api/core/model_runtime/model_providers/bedrock/bedrock.yaml
Normal file
@ -0,0 +1,71 @@
|
||||
provider: bedrock
|
||||
label:
|
||||
en_US: AWS
|
||||
description:
|
||||
en_US: AWS Bedrock's models.
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
en_US: icon_l_en.svg
|
||||
background: "#FCFDFF"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your Access Key and Secret Access Key from AWS Console
|
||||
url:
|
||||
en_US: https://console.aws.amazon.com/
|
||||
supported_model_types:
|
||||
- llm
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
credential_form_schemas:
|
||||
- variable: aws_access_key_id
|
||||
required: true
|
||||
label:
|
||||
en_US: Access Key
|
||||
zh_Hans: Access Key
|
||||
type: secret-input
|
||||
placeholder:
|
||||
en_US: Enter your Access Key
|
||||
zh_Hans: 在此输入您的 Access Key
|
||||
- variable: aws_secret_access_key
|
||||
required: true
|
||||
label:
|
||||
en_US: Secret Access Key
|
||||
zh_Hans: Secret Access Key
|
||||
type: secret-input
|
||||
placeholder:
|
||||
en_US: Enter your Secret Access Key
|
||||
zh_Hans: 在此输入您的 Secret Access Key
|
||||
- variable: aws_region
|
||||
required: true
|
||||
label:
|
||||
en_US: AWS Region
|
||||
zh_Hans: AWS 地区
|
||||
type: select
|
||||
default: us-east-1
|
||||
options:
|
||||
- value: us-east-1
|
||||
label:
|
||||
en_US: US East (N. Virginia)
|
||||
zh_Hans: US East (N. Virginia)
|
||||
- value: us-west-2
|
||||
label:
|
||||
en_US: US West (Oregon)
|
||||
zh_Hans: US West (Oregon)
|
||||
- value: ap-southeast-1
|
||||
label:
|
||||
en_US: Asia Pacific (Singapore)
|
||||
zh_Hans: Asia Pacific (Singapore)
|
||||
- value: ap-northeast-1
|
||||
label:
|
||||
en_US: Asia Pacific (Tokyo)
|
||||
zh_Hans: Asia Pacific (Tokyo)
|
||||
- value: eu-central-1
|
||||
label:
|
||||
en_US: Europe (Frankfurt)
|
||||
zh_Hans: Europe (Frankfurt)
|
||||
- value: us-gov-west-1
|
||||
label:
|
||||
en_US: AWS GovCloud (US-West)
|
||||
zh_Hans: AWS GovCloud (US-West)
|
@ -0,0 +1,10 @@
|
||||
- amazon.titan-text-express-v1
|
||||
- amazon.titan-text-lite-v1
|
||||
- anthropic.claude-instant-v1
|
||||
- anthropic.claude-v1
|
||||
- anthropic.claude-v2
|
||||
- anthropic.claude-v2:1
|
||||
- cohere.command-light-text-v14
|
||||
- cohere.command-text-v14
|
||||
- meta.llama2-13b-chat-v1
|
||||
- meta.llama2-70b-chat-v1
|
@ -0,0 +1,47 @@
|
||||
model: ai21.j2-mid-v1
|
||||
label:
|
||||
en_US: J2 Mid V1
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 8191
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: maxTokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
- name: count_penalty
|
||||
label:
|
||||
en_US: Count Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 1
|
||||
- name: presence_penalty
|
||||
label:
|
||||
en_US: Presence Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 5
|
||||
- name: frequency_penalty
|
||||
label:
|
||||
en_US: Frequency Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 500
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@ -0,0 +1,47 @@
|
||||
model: ai21.j2-ultra-v1
|
||||
label:
|
||||
en_US: J2 Ultra V1
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 8191
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: maxTokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
- name: count_penalty
|
||||
label:
|
||||
en_US: Count Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 1
|
||||
- name: presence_penalty
|
||||
label:
|
||||
en_US: Presence Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 5
|
||||
- name: frequency_penalty
|
||||
label:
|
||||
en_US: Frequency Penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 0
|
||||
min: 0
|
||||
max: 500
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@ -0,0 +1,25 @@
|
||||
model: amazon.titan-text-express-v1
|
||||
label:
|
||||
en_US: Titan Text G1 - Express
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: maxTokenCount
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 8000
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0016'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,25 @@
|
||||
model: amazon.titan-text-lite-v1
|
||||
label:
|
||||
en_US: Titan Text G1 - Lite
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: maxTokenCount
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
pricing:
|
||||
input: '0.0003'
|
||||
output: '0.0004'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,35 @@
|
||||
model: anthropic.claude-instant-v1
|
||||
label:
|
||||
en_US: Claude Instant V1
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: topK
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top K
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
default: 250
|
||||
min: 0
|
||||
max: 500
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.0008'
|
||||
output: '0.0024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,35 @@
|
||||
model: anthropic.claude-v1
|
||||
label:
|
||||
en_US: Claude V1
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: topK
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top K
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
default: 250
|
||||
min: 0
|
||||
max: 500
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.008'
|
||||
output: '0.024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,35 @@
|
||||
model: anthropic.claude-v2
|
||||
label:
|
||||
en_US: Claude V2
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: topK
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top K
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
default: 250
|
||||
min: 0
|
||||
max: 500
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.008'
|
||||
output: '0.024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,35 @@
|
||||
model: anthropic.claude-v2:1
|
||||
label:
|
||||
en_US: Claude V2.1
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: topP
|
||||
use_template: top_p
|
||||
- name: topK
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top K
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
default: 250
|
||||
min: 0
|
||||
max: 500
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.008'
|
||||
output: '0.024'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,35 @@
|
||||
model: cohere.command-light-text-v14
|
||||
label:
|
||||
en_US: Command Light Text V14
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: p
|
||||
use_template: top_p
|
||||
- name: k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
min: 0
|
||||
max: 500
|
||||
default: 0
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.0003'
|
||||
output: '0.0006'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,32 @@
|
||||
model: cohere.command-text-v14
|
||||
label:
|
||||
en_US: Command Text V14
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.0015'
|
||||
output: '0.0020'
|
||||
unit: '0.001'
|
||||
currency: USD
|
486
api/core/model_runtime/model_providers/bedrock/llm/llm.py
Normal file
486
api/core/model_runtime/model_providers/bedrock/llm/llm.py
Normal file
@ -0,0 +1,486 @@
|
||||
import logging
|
||||
from typing import Generator, List, Optional, Union
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, EndpointConnectionError, NoRegionError, ServiceNotInRegionError, UnknownServiceError
|
||||
from botocore.config import Config
|
||||
import json
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessage,
|
||||
PromptMessageTool, SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.errors.invoke import (InvokeAuthorizationError, InvokeBadRequestError, InvokeConnectionError,
|
||||
InvokeError, InvokeRateLimitError, InvokeServerUnavailableError)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BedrockLargeLanguageModel(LargeLanguageModel):
|
||||
|
||||
def _invoke(self, model: str, credentials: dict,
|
||||
prompt_messages: list[PromptMessage], model_parameters: dict,
|
||||
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[List[str]] = None,
|
||||
stream: bool = True, user: Optional[str] = None) \
|
||||
-> Union[LLMResult, Generator]:
|
||||
"""
|
||||
Invoke large language model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param prompt_messages: prompt messages
|
||||
:param model_parameters: model parameters
|
||||
:param tools: tools for tool calling
|
||||
:param stop: stop words
|
||||
:param stream: is stream response
|
||||
:param user: unique user id
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
# invoke model
|
||||
return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user)
|
||||
|
||||
def get_num_tokens(self, model: str, credentials: dict, messages: list[PromptMessage] | str,
|
||||
tools: Optional[list[PromptMessageTool]] = None) -> int:
|
||||
"""
|
||||
Get number of tokens for given prompt messages
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param messages: prompt messages or message string
|
||||
:param tools: tools for tool calling
|
||||
:return:md = genai.GenerativeModel(model)
|
||||
"""
|
||||
prefix = model.split('.')[0]
|
||||
|
||||
if isinstance(messages, str):
|
||||
prompt = messages
|
||||
else:
|
||||
prompt = self._convert_messages_to_prompt(messages, prefix)
|
||||
|
||||
return self._get_num_tokens_by_gpt2(prompt)
|
||||
|
||||
def _convert_messages_to_prompt(self, model_prefix: str, messages: list[PromptMessage]) -> str:
|
||||
"""
|
||||
Format a list of messages into a full prompt for the Google model
|
||||
|
||||
:param messages: List of PromptMessage to combine.
|
||||
:return: Combined string with necessary human_prompt and ai_prompt tags.
|
||||
"""
|
||||
messages = messages.copy() # don't mutate the original list
|
||||
|
||||
text = "".join(
|
||||
self._convert_one_message_to_text(message, model_prefix)
|
||||
for message in messages
|
||||
)
|
||||
|
||||
return text.rstrip()
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate model credentials
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
|
||||
try:
|
||||
ping_message = UserPromptMessage(content="ping")
|
||||
self._generate(model=model,
|
||||
credentials=credentials,
|
||||
prompt_messages=[ping_message],
|
||||
model_parameters={},
|
||||
stream=False)
|
||||
|
||||
except ClientError as ex:
|
||||
error_code = ex.response['Error']['Code']
|
||||
full_error_msg = f"{error_code}: {ex.response['Error']['Message']}"
|
||||
|
||||
raise CredentialsValidateFailedError(str(self._map_client_to_invoke_error(error_code, full_error_msg)))
|
||||
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
def _convert_one_message_to_text(self, message: PromptMessage, model_prefix: str) -> str:
|
||||
"""
|
||||
Convert a single message to a string.
|
||||
|
||||
:param message: PromptMessage to convert.
|
||||
:return: String representation of the message.
|
||||
"""
|
||||
|
||||
if model_prefix == "anthropic":
|
||||
human_prompt_prefix = "\n\nHuman:"
|
||||
human_prompt_postfix = ""
|
||||
ai_prompt = "\n\nAssistant:"
|
||||
|
||||
elif model_prefix == "meta":
|
||||
human_prompt_prefix = "\n[INST]"
|
||||
human_prompt_postfix = "[\\INST]\n"
|
||||
ai_prompt = ""
|
||||
|
||||
elif model_prefix == "amazon":
|
||||
human_prompt_prefix = "\n\nUser:"
|
||||
human_prompt_postfix = ""
|
||||
ai_prompt = "\n\nBot:"
|
||||
|
||||
else:
|
||||
human_prompt_prefix = ""
|
||||
human_prompt_postfix = ""
|
||||
ai_prompt = ""
|
||||
|
||||
content = message.content
|
||||
|
||||
if isinstance(message, UserPromptMessage):
|
||||
message_text = f"{human_prompt_prefix} {content} {human_prompt_postfix}"
|
||||
elif isinstance(message, AssistantPromptMessage):
|
||||
message_text = f"{ai_prompt} {content}"
|
||||
elif isinstance(message, SystemPromptMessage):
|
||||
message_text = content
|
||||
else:
|
||||
raise ValueError(f"Got unknown type {message}")
|
||||
|
||||
return message_text
|
||||
|
||||
def _convert_messages_to_prompt(self, messages: List[PromptMessage], model_prefix: str) -> str:
|
||||
"""
|
||||
Format a list of messages into a full prompt for the Anthropic, Amazon and Llama models
|
||||
|
||||
:param messages: List of PromptMessage to combine.
|
||||
:return: Combined string with necessary human_prompt and ai_prompt tags.
|
||||
"""
|
||||
if not messages:
|
||||
return ''
|
||||
|
||||
messages = messages.copy() # don't mutate the original list
|
||||
if not isinstance(messages[-1], AssistantPromptMessage):
|
||||
messages.append(AssistantPromptMessage(content=""))
|
||||
|
||||
text = "".join(
|
||||
self._convert_one_message_to_text(message, model_prefix)
|
||||
for message in messages
|
||||
)
|
||||
|
||||
# trim off the trailing ' ' that might come from the "Assistant: "
|
||||
return text.rstrip()
|
||||
|
||||
def _create_payload(self, model_prefix: str, prompt_messages: list[PromptMessage], model_parameters: dict, stop: Optional[List[str]] = None, stream: bool = True):
|
||||
"""
|
||||
Create payload for bedrock api call depending on model provider
|
||||
"""
|
||||
payload = dict()
|
||||
|
||||
if model_prefix == "amazon":
|
||||
payload["textGenerationConfig"] = { **model_parameters }
|
||||
payload["textGenerationConfig"]["stopSequences"] = ["User:"] + (stop if stop else [])
|
||||
|
||||
payload["inputText"] = self._convert_messages_to_prompt(prompt_messages, model_prefix)
|
||||
|
||||
elif model_prefix == "ai21":
|
||||
payload["temperature"] = model_parameters.get("temperature")
|
||||
payload["topP"] = model_parameters.get("topP")
|
||||
payload["maxTokens"] = model_parameters.get("maxTokens")
|
||||
payload["prompt"] = self._convert_messages_to_prompt(prompt_messages, model_prefix)
|
||||
|
||||
# jurassic models only support a single stop sequence
|
||||
if stop:
|
||||
payload["stopSequences"] = stop[0]
|
||||
|
||||
if model_parameters.get("presencePenalty"):
|
||||
payload["presencePenalty"] = {model_parameters.get("presencePenalty")}
|
||||
if model_parameters.get("frequencyPenalty"):
|
||||
payload["frequencyPenalty"] = {model_parameters.get("frequencyPenalty")}
|
||||
if model_parameters.get("countPenalty"):
|
||||
payload["countPenalty"] = {model_parameters.get("countPenalty")}
|
||||
|
||||
elif model_prefix == "anthropic":
|
||||
payload = { **model_parameters }
|
||||
payload["prompt"] = self._convert_messages_to_prompt(prompt_messages, model_prefix)
|
||||
payload["stop_sequences"] = ["\n\nHuman:"] + (stop if stop else [])
|
||||
|
||||
elif model_prefix == "cohere":
|
||||
payload = { **model_parameters }
|
||||
payload["prompt"] = prompt_messages[0].content
|
||||
payload["stream"] = stream
|
||||
|
||||
elif model_prefix == "meta":
|
||||
payload = { **model_parameters }
|
||||
payload["prompt"] = self._convert_messages_to_prompt(prompt_messages, model_prefix)
|
||||
|
||||
else:
|
||||
raise ValueError(f"Got unknown model prefix {model_prefix}")
|
||||
|
||||
return payload
|
||||
|
||||
def _generate(self, model: str, credentials: dict,
|
||||
prompt_messages: list[PromptMessage], model_parameters: dict,
|
||||
stop: Optional[List[str]] = None, stream: bool = True,
|
||||
user: Optional[str] = None) -> Union[LLMResult, Generator]:
|
||||
"""
|
||||
Invoke large language model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: credentials kwargs
|
||||
:param prompt_messages: prompt messages
|
||||
:param model_parameters: model parameters
|
||||
:param stop: stop words
|
||||
:param stream: is stream response
|
||||
:param user: unique user id
|
||||
:return: full response or stream response chunk generator result
|
||||
"""
|
||||
client_config = Config(
|
||||
region_name=credentials["aws_region"]
|
||||
)
|
||||
|
||||
runtime_client = boto3.client(
|
||||
service_name='bedrock-runtime',
|
||||
config=client_config,
|
||||
aws_access_key_id=credentials["aws_access_key_id"],
|
||||
aws_secret_access_key=credentials["aws_secret_access_key"]
|
||||
)
|
||||
|
||||
model_prefix = model.split('.')[0]
|
||||
payload = self._create_payload(model_prefix, prompt_messages, model_parameters, stop, stream)
|
||||
|
||||
# need workaround for ai21 models which doesn't support streaming
|
||||
if stream and model_prefix != "ai21":
|
||||
invoke = runtime_client.invoke_model_with_response_stream
|
||||
else:
|
||||
invoke = runtime_client.invoke_model
|
||||
|
||||
try:
|
||||
response = invoke(
|
||||
body=json.dumps(payload),
|
||||
modelId=model,
|
||||
)
|
||||
except ClientError as ex:
|
||||
error_code = ex.response['Error']['Code']
|
||||
full_error_msg = f"{error_code}: {ex.response['Error']['Message']}"
|
||||
raise self._map_client_to_invoke_error(error_code, full_error_msg)
|
||||
|
||||
except (EndpointConnectionError, NoRegionError, ServiceNotInRegionError) as ex:
|
||||
raise InvokeConnectionError(str(ex))
|
||||
|
||||
except UnknownServiceError as ex:
|
||||
raise InvokeServerUnavailableError(str(ex))
|
||||
|
||||
except Exception as ex:
|
||||
raise InvokeError(str(ex))
|
||||
|
||||
|
||||
if stream:
|
||||
return self._handle_generate_stream_response(model, credentials, response, prompt_messages)
|
||||
|
||||
return self._handle_generate_response(model, credentials, response, prompt_messages)
|
||||
|
||||
def _handle_generate_response(self, model: str, credentials: dict, response: dict,
|
||||
prompt_messages: list[PromptMessage]) -> LLMResult:
|
||||
"""
|
||||
Handle llm response
|
||||
|
||||
:param model: model name
|
||||
:param credentials: credentials
|
||||
:param response: response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response
|
||||
"""
|
||||
response_body = json.loads(response.get('body').read().decode('utf-8'))
|
||||
|
||||
finish_reason = response_body.get("error")
|
||||
|
||||
if finish_reason is not None:
|
||||
raise InvokeError(finish_reason)
|
||||
|
||||
# get output text and calculate num tokens based on model / provider
|
||||
model_prefix = model.split('.')[0]
|
||||
|
||||
if model_prefix == "amazon":
|
||||
output = response_body.get("results")[0].get("outputText").strip('\n')
|
||||
prompt_tokens = response_body.get("inputTextTokenCount")
|
||||
completion_tokens = response_body.get("results")[0].get("tokenCount")
|
||||
|
||||
elif model_prefix == "ai21":
|
||||
output = response_body.get('completions')[0].get('data').get('text')
|
||||
prompt_tokens = len(response_body.get("prompt").get("tokens"))
|
||||
completion_tokens = len(response_body.get('completions')[0].get('data').get('tokens'))
|
||||
|
||||
elif model_prefix == "anthropic":
|
||||
output = response_body.get("completion")
|
||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||
completion_tokens = self.get_num_tokens(model, credentials, output if output else '')
|
||||
|
||||
elif model_prefix == "cohere":
|
||||
output = response_body.get("generations")[0].get("text")
|
||||
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
|
||||
completion_tokens = self.get_num_tokens(model, credentials, output if output else '')
|
||||
|
||||
elif model_prefix == "meta":
|
||||
output = response_body.get("generation").strip('\n')
|
||||
prompt_tokens = response_body.get("prompt_token_count")
|
||||
completion_tokens = response_body.get("generation_token_count")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response")
|
||||
|
||||
# construct assistant message from output
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content=output
|
||||
)
|
||||
|
||||
# calculate usage
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
|
||||
# construct response
|
||||
result = LLMResult(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
message=assistant_prompt_message,
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_generate_stream_response(self, model: str, credentials: dict, response: dict,
|
||||
prompt_messages: list[PromptMessage]) -> Generator:
|
||||
"""
|
||||
Handle llm stream response
|
||||
|
||||
:param model: model name
|
||||
:param credentials: credentials
|
||||
:param response: response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response chunk generator result
|
||||
"""
|
||||
model_prefix = model.split('.')[0]
|
||||
if model_prefix == "ai21":
|
||||
response_body = json.loads(response.get('body').read().decode('utf-8'))
|
||||
|
||||
content = response_body.get('completions')[0].get('data').get('text')
|
||||
finish_reason = response_body.get('completions')[0].get('finish_reason')
|
||||
|
||||
prompt_tokens = len(response_body.get("prompt").get("tokens"))
|
||||
completion_tokens = len(response_body.get('completions')[0].get('data').get('tokens'))
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(content=content),
|
||||
finish_reason=finish_reason,
|
||||
usage=usage
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
stream = response.get('body')
|
||||
if not stream:
|
||||
raise InvokeError('No response body')
|
||||
|
||||
index = -1
|
||||
for event in stream:
|
||||
chunk = event.get('chunk')
|
||||
|
||||
if not chunk:
|
||||
exception_name = next(iter(event))
|
||||
full_ex_msg = f"{exception_name}: {event[exception_name]['message']}"
|
||||
|
||||
raise self._map_client_to_invoke_error(exception_name, full_ex_msg)
|
||||
|
||||
payload = json.loads(chunk.get('bytes').decode())
|
||||
|
||||
model_prefix = model.split('.')[0]
|
||||
if model_prefix == "amazon":
|
||||
content_delta = payload.get("outputText").strip('\n')
|
||||
finish_reason = payload.get("completion_reason")
|
||||
|
||||
elif model_prefix == "anthropic":
|
||||
content_delta = payload
|
||||
finish_reason = payload.get("stop_reason")
|
||||
|
||||
elif model_prefix == "cohere":
|
||||
content_delta = payload.get("text")
|
||||
finish_reason = payload.get("finish_reason")
|
||||
|
||||
elif model_prefix == "meta":
|
||||
content_delta = payload.get("generation").strip('\n')
|
||||
finish_reason = payload.get("stop_reason")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Got unknown model prefix {model_prefix} when handling stream response")
|
||||
|
||||
index += 1
|
||||
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content = content_delta if content_delta else '',
|
||||
)
|
||||
|
||||
if not finish_reason:
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=index,
|
||||
message=assistant_prompt_message
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
# get num tokens from metrics in last chunk
|
||||
prompt_tokens = payload["amazon-bedrock-invocationMetrics"]["inputTokenCount"]
|
||||
completion_tokens = payload["amazon-bedrock-invocationMetrics"]["outputTokenCount"]
|
||||
|
||||
# transform usage
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=index,
|
||||
message=assistant_prompt_message,
|
||||
finish_reason=finish_reason,
|
||||
usage=usage
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the ermd = genai.GenerativeModel(model)ror type thrown to the caller
|
||||
The value is the md = genai.GenerativeModel(model)error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke emd = genai.GenerativeModel(model)rror mapping
|
||||
"""
|
||||
return {
|
||||
InvokeConnectionError: [],
|
||||
InvokeServerUnavailableError: [],
|
||||
InvokeRateLimitError: [],
|
||||
InvokeAuthorizationError: [],
|
||||
InvokeBadRequestError: []
|
||||
}
|
||||
|
||||
def _map_client_to_invoke_error(self, error_code: str, error_msg: str) -> type[InvokeError]:
|
||||
"""
|
||||
Map client error to invoke error
|
||||
|
||||
:param error_code: error code
|
||||
:param error_msg: error message
|
||||
:return: invoke error
|
||||
"""
|
||||
|
||||
if error_code == "AccessDeniedException":
|
||||
return InvokeAuthorizationError(error_msg)
|
||||
elif error_code in ["ResourceNotFoundException", "ValidationException"]:
|
||||
return InvokeBadRequestError(error_msg)
|
||||
elif error_code in ["ThrottlingException", "ServiceQuotaExceededException"]:
|
||||
return InvokeRateLimitError(error_msg)
|
||||
elif error_code in ["ModelTimeoutException", "ModelErrorException", "InternalServerException", "ModelNotReadyException"]:
|
||||
return InvokeServerUnavailableError(error_msg)
|
||||
elif error_code == "ModelStreamErrorException":
|
||||
return InvokeConnectionError(error_msg)
|
||||
|
||||
return InvokeError(error_msg)
|
@ -0,0 +1,23 @@
|
||||
model: meta.llama2-13b-chat-v1
|
||||
label:
|
||||
en_US: Llama 2 Chat 13B
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_gen_len
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
pricing:
|
||||
input: '0.00075'
|
||||
output: '0.00100'
|
||||
unit: '0.001'
|
||||
currency: USD
|
@ -0,0 +1,23 @@
|
||||
model: meta.llama2-70b-chat-v1
|
||||
label:
|
||||
en_US: Llama 2 Chat 70B
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: max_gen_len
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
pricing:
|
||||
input: '0.00195'
|
||||
output: '0.00256'
|
||||
unit: '0.001'
|
||||
currency: USD
|
117
api/tests/integration_tests/model_runtime/bedrock/test_llm.py
Normal file
117
api/tests/integration_tests/model_runtime/bedrock/test_llm.py
Normal file
@ -0,0 +1,117 @@
|
||||
import os
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.bedrock.llm.llm import BedrockLargeLanguageModel
|
||||
|
||||
def test_validate_credentials():
|
||||
model = BedrockLargeLanguageModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='meta.llama2-13b-chat-v1',
|
||||
credentials={
|
||||
'anthropic_api_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='meta.llama2-13b-chat-v1',
|
||||
credentials={
|
||||
"aws_region": os.getenv("AWS_REGION"),
|
||||
"aws_access_key": os.getenv("AWS_ACCESS_KEY"),
|
||||
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
)
|
||||
|
||||
def test_invoke_model():
|
||||
model = BedrockLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='meta.llama2-13b-chat-v1',
|
||||
credentials={
|
||||
"aws_region": os.getenv("AWS_REGION"),
|
||||
"aws_access_key": os.getenv("AWS_ACCESS_KEY"),
|
||||
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content='You are a helpful AI assistant.',
|
||||
),
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.0,
|
||||
'top_p': 1.0,
|
||||
'max_tokens_to_sample': 10
|
||||
},
|
||||
stop=['How'],
|
||||
stream=False,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.message.content) > 0
|
||||
|
||||
def test_invoke_stream_model():
|
||||
model = BedrockLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='meta.llama2-13b-chat-v1',
|
||||
credentials={
|
||||
"aws_region": os.getenv("AWS_REGION"),
|
||||
"aws_access_key": os.getenv("AWS_ACCESS_KEY"),
|
||||
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content='You are a helpful AI assistant.',
|
||||
),
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.0,
|
||||
'max_tokens_to_sample': 100
|
||||
},
|
||||
stream=True,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(response, Generator)
|
||||
|
||||
for chunk in response:
|
||||
print(chunk)
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = BedrockLargeLanguageModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model='meta.llama2-13b-chat-v1',
|
||||
credentials = {
|
||||
"aws_region": os.getenv("AWS_REGION"),
|
||||
"aws_access_key": os.getenv("AWS_ACCESS_KEY"),
|
||||
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
},
|
||||
messages=[
|
||||
SystemPromptMessage(
|
||||
content='You are a helpful AI assistant.',
|
||||
),
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
assert num_tokens == 18
|
@ -0,0 +1,21 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider
|
||||
|
||||
def test_validate_provider_credentials():
|
||||
provider = BedrockProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(
|
||||
credentials={}
|
||||
)
|
||||
|
||||
provider.validate_provider_credentials(
|
||||
credentials={
|
||||
"aws_region": os.getenv("AWS_REGION"),
|
||||
"aws_access_key": os.getenv("AWS_ACCESS_KEY"),
|
||||
"aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
)
|
Loading…
x
Reference in New Issue
Block a user