Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
5397799a
Unverified
Commit
5397799a
authored
Aug 24, 2023
by
Jyong
Committed by
GitHub
Aug 24, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
document limit (#999)
Co-authored-by:
jyong
<
jyong@dify.ai
>
parent
8e837dde
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
57 additions
and
7 deletions
+57
-7
datasets_document.py
api/controllers/console/datasets/datasets_document.py
+37
-4
dataset_service.py
api/services/dataset_service.py
+20
-3
No files found.
api/controllers/console/datasets/datasets_document.py
View file @
5397799a
...
@@ -3,7 +3,7 @@ import random
...
@@ -3,7 +3,7 @@ import random
from
datetime
import
datetime
from
datetime
import
datetime
from
typing
import
List
from
typing
import
List
from
flask
import
request
from
flask
import
request
,
current_app
from
flask_login
import
current_user
from
flask_login
import
current_user
from
core.login.login
import
login_required
from
core.login.login
import
login_required
from
flask_restful
import
Resource
,
fields
,
marshal
,
marshal_with
,
reqparse
from
flask_restful
import
Resource
,
fields
,
marshal
,
marshal_with
,
reqparse
...
@@ -275,7 +275,8 @@ class DatasetDocumentListApi(Resource):
...
@@ -275,7 +275,8 @@ class DatasetDocumentListApi(Resource):
parser
.
add_argument
(
'duplicate'
,
type
=
bool
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'duplicate'
,
type
=
bool
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'original_document_id'
,
type
=
str
,
required
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'original_document_id'
,
type
=
str
,
required
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_form'
,
type
=
str
,
default
=
'text_model'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_form'
,
type
=
str
,
default
=
'text_model'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_language'
,
type
=
str
,
default
=
'English'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_language'
,
type
=
str
,
default
=
'English'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
if
not
dataset
.
indexing_technique
and
not
args
[
'indexing_technique'
]:
if
not
dataset
.
indexing_technique
and
not
args
[
'indexing_technique'
]:
...
@@ -335,7 +336,8 @@ class DatasetInitApi(Resource):
...
@@ -335,7 +336,8 @@ class DatasetInitApi(Resource):
parser
.
add_argument
(
'data_source'
,
type
=
dict
,
required
=
True
,
nullable
=
True
,
location
=
'json'
)
parser
.
add_argument
(
'data_source'
,
type
=
dict
,
required
=
True
,
nullable
=
True
,
location
=
'json'
)
parser
.
add_argument
(
'process_rule'
,
type
=
dict
,
required
=
True
,
nullable
=
True
,
location
=
'json'
)
parser
.
add_argument
(
'process_rule'
,
type
=
dict
,
required
=
True
,
nullable
=
True
,
location
=
'json'
)
parser
.
add_argument
(
'doc_form'
,
type
=
str
,
default
=
'text_model'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_form'
,
type
=
str
,
default
=
'text_model'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_language'
,
type
=
str
,
default
=
'English'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
parser
.
add_argument
(
'doc_language'
,
type
=
str
,
default
=
'English'
,
required
=
False
,
nullable
=
False
,
location
=
'json'
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
try
:
try
:
...
@@ -483,7 +485,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
...
@@ -483,7 +485,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
indexing_runner
=
IndexingRunner
()
indexing_runner
=
IndexingRunner
()
try
:
try
:
response
=
indexing_runner
.
file_indexing_estimate
(
current_user
.
current_tenant_id
,
file_details
,
response
=
indexing_runner
.
file_indexing_estimate
(
current_user
.
current_tenant_id
,
file_details
,
data_process_rule_dict
,
None
,
dataset_id
)
data_process_rule_dict
,
None
,
dataset_id
)
except
LLMBadRequestError
:
except
LLMBadRequestError
:
raise
ProviderNotInitializeError
(
raise
ProviderNotInitializeError
(
f
"No Embedding Model available. Please configure a valid provider "
f
"No Embedding Model available. Please configure a valid provider "
...
@@ -855,6 +857,14 @@ class DocumentStatusApi(DocumentResource):
...
@@ -855,6 +857,14 @@ class DocumentStatusApi(DocumentResource):
if
not
document
.
archived
:
if
not
document
.
archived
:
raise
InvalidActionError
(
'Document is not archived.'
)
raise
InvalidActionError
(
'Document is not archived.'
)
# check document limit
if
current_app
.
config
[
'EDITION'
]
==
'CLOUD'
:
documents_count
=
DocumentService
.
get_tenant_documents_count
()
total_count
=
documents_count
+
1
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
if
total_count
>
tenant_document_count
:
raise
ValueError
(
f
"All your documents have overed limit {tenant_document_count}."
)
document
.
archived
=
False
document
.
archived
=
False
document
.
archived_at
=
None
document
.
archived_at
=
None
document
.
archived_by
=
None
document
.
archived_by
=
None
...
@@ -872,6 +882,10 @@ class DocumentStatusApi(DocumentResource):
...
@@ -872,6 +882,10 @@ class DocumentStatusApi(DocumentResource):
class
DocumentPauseApi
(
DocumentResource
):
class
DocumentPauseApi
(
DocumentResource
):
@
setup_required
@
login_required
@
account_initialization_required
def
patch
(
self
,
dataset_id
,
document_id
):
def
patch
(
self
,
dataset_id
,
document_id
):
"""pause document."""
"""pause document."""
dataset_id
=
str
(
dataset_id
)
dataset_id
=
str
(
dataset_id
)
...
@@ -901,6 +915,9 @@ class DocumentPauseApi(DocumentResource):
...
@@ -901,6 +915,9 @@ class DocumentPauseApi(DocumentResource):
class
DocumentRecoverApi
(
DocumentResource
):
class
DocumentRecoverApi
(
DocumentResource
):
@
setup_required
@
login_required
@
account_initialization_required
def
patch
(
self
,
dataset_id
,
document_id
):
def
patch
(
self
,
dataset_id
,
document_id
):
"""recover document."""
"""recover document."""
dataset_id
=
str
(
dataset_id
)
dataset_id
=
str
(
dataset_id
)
...
@@ -926,6 +943,21 @@ class DocumentRecoverApi(DocumentResource):
...
@@ -926,6 +943,21 @@ class DocumentRecoverApi(DocumentResource):
return
{
'result'
:
'success'
},
204
return
{
'result'
:
'success'
},
204
class
DocumentLimitApi
(
DocumentResource
):
@
setup_required
@
login_required
@
account_initialization_required
def
get
(
self
):
"""get document limit"""
documents_count
=
DocumentService
.
get_tenant_documents_count
()
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
return
{
'documents_count'
:
documents_count
,
'documents_limit'
:
tenant_document_count
},
200
api
.
add_resource
(
GetProcessRuleApi
,
'/datasets/process-rule'
)
api
.
add_resource
(
GetProcessRuleApi
,
'/datasets/process-rule'
)
api
.
add_resource
(
DatasetDocumentListApi
,
api
.
add_resource
(
DatasetDocumentListApi
,
'/datasets/<uuid:dataset_id>/documents'
)
'/datasets/<uuid:dataset_id>/documents'
)
...
@@ -951,3 +983,4 @@ api.add_resource(DocumentStatusApi,
...
@@ -951,3 +983,4 @@ api.add_resource(DocumentStatusApi,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>'
)
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/status/<string:action>'
)
api
.
add_resource
(
DocumentPauseApi
,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause'
)
api
.
add_resource
(
DocumentPauseApi
,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause'
)
api
.
add_resource
(
DocumentRecoverApi
,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume'
)
api
.
add_resource
(
DocumentRecoverApi
,
'/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume'
)
api
.
add_resource
(
DocumentLimitApi
,
'/datasets/limit'
)
api/services/dataset_service.py
View file @
5397799a
...
@@ -394,11 +394,20 @@ class DocumentService:
...
@@ -394,11 +394,20 @@ class DocumentService:
def
save_document_with_dataset_id
(
dataset
:
Dataset
,
document_data
:
dict
,
def
save_document_with_dataset_id
(
dataset
:
Dataset
,
document_data
:
dict
,
account
:
Account
,
dataset_process_rule
:
Optional
[
DatasetProcessRule
]
=
None
,
account
:
Account
,
dataset_process_rule
:
Optional
[
DatasetProcessRule
]
=
None
,
created_from
:
str
=
'web'
):
created_from
:
str
=
'web'
):
# check document limit
# check document limit
if
current_app
.
config
[
'EDITION'
]
==
'CLOUD'
:
if
current_app
.
config
[
'EDITION'
]
==
'CLOUD'
:
count
=
0
if
document_data
[
"data_source"
][
"type"
]
==
"upload_file"
:
upload_file_list
=
document_data
[
"data_source"
][
"info_list"
][
'file_info_list'
][
'file_ids'
]
count
=
len
(
upload_file_list
)
elif
document_data
[
"data_source"
][
"type"
]
==
"notion_import"
:
notion_page_list
=
document_data
[
"data_source"
][
'info_list'
][
'notion_info_list'
][
'pages'
]
count
=
len
(
notion_page_list
)
documents_count
=
DocumentService
.
get_tenant_documents_count
()
documents_count
=
DocumentService
.
get_tenant_documents_count
()
total_count
=
documents_count
+
count
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
if
documents
_count
>
tenant_document_count
:
if
total
_count
>
tenant_document_count
:
raise
ValueError
(
f
"over document limit {tenant_document_count}."
)
raise
ValueError
(
f
"over document limit {tenant_document_count}."
)
# if dataset is empty, update dataset data_source_type
# if dataset is empty, update dataset data_source_type
if
not
dataset
.
data_source_type
:
if
not
dataset
.
data_source_type
:
...
@@ -649,12 +658,20 @@ class DocumentService:
...
@@ -649,12 +658,20 @@ class DocumentService:
@
staticmethod
@
staticmethod
def
save_document_without_dataset_id
(
tenant_id
:
str
,
document_data
:
dict
,
account
:
Account
):
def
save_document_without_dataset_id
(
tenant_id
:
str
,
document_data
:
dict
,
account
:
Account
):
count
=
0
if
document_data
[
"data_source"
][
"type"
]
==
"upload_file"
:
upload_file_list
=
document_data
[
"data_source"
][
"info_list"
][
'file_info_list'
][
'file_ids'
]
count
=
len
(
upload_file_list
)
elif
document_data
[
"data_source"
][
"type"
]
==
"notion_import"
:
notion_page_list
=
document_data
[
"data_source"
][
'info_list'
][
'notion_info_list'
][
'pages'
]
count
=
len
(
notion_page_list
)
# check document limit
# check document limit
if
current_app
.
config
[
'EDITION'
]
==
'CLOUD'
:
if
current_app
.
config
[
'EDITION'
]
==
'CLOUD'
:
documents_count
=
DocumentService
.
get_tenant_documents_count
()
documents_count
=
DocumentService
.
get_tenant_documents_count
()
total_count
=
documents_count
+
count
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
tenant_document_count
=
int
(
current_app
.
config
[
'TENANT_DOCUMENT_COUNT'
])
if
documents
_count
>
tenant_document_count
:
if
total
_count
>
tenant_document_count
:
raise
ValueError
(
f
"
over document
limit {tenant_document_count}."
)
raise
ValueError
(
f
"
All your documents have overed
limit {tenant_document_count}."
)
embedding_model
=
ModelFactory
.
get_embedding_model
(
embedding_model
=
ModelFactory
.
get_embedding_model
(
tenant_id
=
tenant_id
tenant_id
=
tenant_id
)
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment