为什么我的query加上prompt后做检索的结果很差,但是我query和document都加上query_prompt之后结果就提升了很多,这是个很反直觉的问题
我的测试集对于每个query,有1个正例和99个负例,我的问题如标题所示,
我的代码如下:
import json
from pathlib import Path
from transformers import AutoModel, AutoTokenizer
import numpy as np
import torch
import torch.nn.functional as F
import time
model_name = "..."
device = '...'
max_seq_len = 8192
embedding_batch_size = 32
similarity_batch_size = 96
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained(
model_name,
trust_remote_code=True,
).half().eval().to(device)
def last_token_pool(last_hidden_states,
attention_mask):
left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
if left_padding:
return last_hidden_states[:, -1]
else:
sequence_lengths = attention_mask.sum(dim=1) - 1
batch_size = last_hidden_states.shape[0]
return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
def get_detailed_instruct(task_description, query,is_english=True):
if is_english:
return f'Instruct: {task_description}\nQuery: {query}'
else:
return f'指令:给定一个网页搜索查询,检索出能够回答该查询的相关段落\n查询:{query}'
task = 'Given a web search query, retrieve relevant passages that answer the query'
def get_embeddings_transformer(texts, is_query=False, batch_size=32,is_english=True):
all_embeddings = []
if is_query:
processed_texts = [get_detailed_instruct(task, text,is_english) for text in texts]
else:
processed_texts = texts
model.eval()
with torch.no_grad():
for i in range(0, len(processed_texts), batch_size):
batch = processed_texts[i:i+batch_size]
inputs = tokenizer(
batch,
return_tensors="pt",
padding=True,
truncation=True,
max_length=max_seq_len # Use the defined max length
)
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model(**inputs)
batch_embeddings = last_token_pool(outputs.last_hidden_state, inputs['attention_mask'])
batch_embeddings = F.normalize(batch_embeddings, p=2, dim=1)
all_embeddings.append(batch_embeddings.cpu()) # Move to CPU
del inputs, outputs, batch_embeddings
if torch.cuda.is_available():
torch.cuda.empty_cache()
return torch.cat(all_embeddings, dim=0)
--- Load Data ---
en_path = 'val_embedding_dataset_en.json'
print(f"Loading data from: {en_path}")
with open(en_path,'r',encoding='utf-8') as f:
data_en = json.load(f)
querys_en = list(data_en.keys())
--- Pre-calculate Query Embeddings ---
print("Calculating query embeddings...")
Use the modified function, specifying is_query=True and batch size
query_en_embeddings = get_embeddings_transformer(
querys_en,
is_query=True,
batch_size=embedding_batch_size
)
print(f"Calculated {len(query_en_embeddings)} query embeddings.")
--- Prepare Document Lists ---
all_results_en = []
for query in querys_en:
docs_data = data_en[query] # Rename variable to avoid conflict
positive_docs = [doc['content'] for doc in docs_data if doc['answer']]
negative_docs = [doc['content'] for doc in docs_data if not doc['answer']]
all_results_en.append((query, positive_docs, negative_docs))
--- Process and Rank (Main loop structure remains the same) ---
output_file_en = Path("rank_results_en_gteqwen2_bothprompt.json") # Slightly different name
print(f"Output will be saved to: {output_file_en}")
if output_file_en.exists():
try:
with open(output_file_en, 'r', encoding='utf-8') as f:
rank_results_en = json.load(f)
print(f"Loaded {len(rank_results_en)} existing results.")
except json.JSONDecodeError:
print("Error reading existing results file, starting fresh.")
rank_results_en = []
else:
rank_results_en = []
start_idx = len(rank_results_en)
print(f"Starting processing from query index {start_idx}")
for idx, (query, pos_docs, neg_docs) in enumerate(all_results_en):
if idx < start_idx:
continue
print(f"\nProcessing Query {idx+1}/{len(all_results_en)}: {query[:80]}...")
try:
start_time = time.time()
docs = pos_docs + neg_docs # Keep this name for the combined list
if not docs:
print("Warning: No documents found for this query. Skipping.")
rank_results_en.append({
"query": query,
"ranked_docs": [],
"positive_ranks": [],
"scores": []
})
continue
# --- Calculate Document Embeddings ---
# Use the modified function, is_query=False (default), and batch size
doc_embeddings_cpu = get_embeddings_transformer(
docs,
is_query=False, # 采用False时,我的评估结果还没有一些小模型效果好,当我把False改成True之后,结果就好了很多
batch_size=embedding_batch_size
)
# --- Calculate Similarities (Your batching logic here) ---
query_embedding_gpu = query_en_embeddings[idx].unsqueeze(0).to(device)
sim_scores = []
print(f" Calculating similarities for {len(docs)} documents...")
for i in range(0, len(doc_embeddings_cpu), similarity_batch_size):
batch_embeddings_gpu = doc_embeddings_cpu[i:i+similarity_batch_size].to(device)
# Calculate cosine similarity on GPU (works fine with normalized embeddings)
with torch.no_grad():
batch_scores = F.cosine_similarity(
query_embedding_gpu,
batch_embeddings_gpu,
dim=1
).cpu().tolist()
sim_scores.extend(batch_scores)
del batch_embeddings_gpu, batch_scores
if torch.cuda.is_available():
torch.cuda.empty_cache()
# --- Rank Documents ---
rank_indices = np.argsort(sim_scores)[::-1]
ranked_docs = [docs[i] for i in rank_indices]
ranked_scores = [sim_scores[i] for i in rank_indices]
pos_ranks = []
for pos_doc in pos_docs:
try:
rank = ranked_docs.index(pos_doc) + 1
pos_ranks.append(rank)
except ValueError:
print(f" Warning: Positive document not found in ranked list for query {idx+1}.")
pos_ranks.append(-1)
# --- Store Results ---
rank_results_en.append({
"query": query,
"ranked_docs": ranked_docs, # Optionally exclude to save space
"positive_ranks": pos_ranks,
"scores": ranked_scores
})
# --- Save Progress ---
with open(output_file_en, 'w', encoding='utf-8') as f:
json.dump(rank_results_en, f, ensure_ascii=False, indent=2)
# --- Cleanup ---
del docs, doc_embeddings_cpu, sim_scores, ranked_docs, pos_ranks, ranked_scores, query_embedding_gpu
if torch.cuda.is_available():
torch.cuda.empty_cache()
time_used = time.time() - start_time
print(f" Finished Query {idx+1}. Time taken: {time_used:.2f}s")
except Exception as e:
print(f" Error processing query {idx+1} ({query[:80]}...): {str(e)}")
# Consider saving progress even on error
with open(output_file_en, 'w', encoding='utf-8') as f:
json.dump(rank_results_en, f, ensure_ascii=False, indent=2)
continue
测试的数据方便提供吗,我们看一下原因
这是私有数据,我给其中一条吧,query是"how to solved issues in the claims problem. "
docs如下,其中第1条是正例,另外99条是负例(评论字符有要求我只给了部分,如果还想要更具体的测试数据,请留一个邮箱),我在按照官方文档方式处理时正例排12,但是如果把我的docs也加上prompt之后,结果就排第1了,不过也不是说在所有示例都会造成这样的结果,但是在我测试的十条数据中,平均MMR从0.4743提高到了0.6958:
[
"Second, we associate each streaming problem with a claims problem, where the users are identified as the issues in the claims problem. Then, problems can be solved in two stages. Either focussing on each issue (user) first and agents (artists) afterwards, or viceversa. We show (Theorem 3) how the indices we consider for streaming problems can be rationalized as two-stage claims rules. More precisely, the pro-rata index and the user-centric index can be rationalized as weighted proportional rules (although there is no bijection between this family of rules and the weighted indices for streaming problems). Furthermore, the allcation rules these two indices induce can also be described as two-stage (claims) rules where we first decide the importance of each user and then the importance of each artist for each user, which is computed as the sum over all users.",
"The game-theoretical approach is an indirect way of solving streaming problems. The problem of adjudicating conflicting claims (in short, claims problem) models a basic situation in which an endowment is allocated among agents who have claims on it, and the available amount is not enough to fully honor all claims. This is a classic problem that can be traced back to ancient sources, such as Aristotle and the Talmud, although is formal treatment is somewhat recent (e.g., O'Neill, 1982; Thomson, 2019). Ju et al. 2007) generalize these problems to account for multiple issues. 10 We show that some of the two-stage rules from generalized claims problems can rationalize some of the allocation rules we consider to solve streaming problems. The rest of the paper is organized as follows. In Section 2, we introduce our model and main concepts for our analysis of streaming problems. In Section 3, we present the axiomatic approach to our problems including the main characterization results we obtain. In Section 4, we explore the game-theoretical approach to our problem. In Section 5, we explore another indirect approach to solve our problems based on claims problems. Most of the proofs have been postponed to an appendix.",
"Thomson ( 2019) is an excellent survey with an extensive treatment of the sizable literature emanating from that seminal contribution. 19 The do so mostly thanks to the axiom of reallocation-proofness, which does not have a parallel in this paper. The weighted proportional rule associated to ω assigns, for each problem (N, K, c, E) and each i ∈ N the amount In words, rule P ω first applies the proportional rule to each single-dimensional sub-problem (N, {j} , c .j , E) Calleja et al., (2005) introduce multi-issue allocation situations, which are a particular case of claims problems. 20 Bergantiños et al., (2010, 2011, 2018) consider two-stage rules for claims problems where in the first stage the endowment is divided among the issues and in the second stage the amount assigned to each issue is divided among the agents. The final amount received by each agent is the sum over all issues. Formally, let ψ and φ be two bankruptcy rules. The two-stage rule R ψ,φ the claims rule obtained from the following two-stage procedure: 1. First stage. 2. Second stage. But all those papers take the axiomatic approach inspired on the literature of bankruptcy problems, whch is unrelated to the axiomatic study of this paper.",
"The current level of development of computer technology allows us to significantly expand the class of applied problems which can be solved using simulation methods. Many scientific and technical problems which have been solved analytically are nowadays solved by numerical methods using relevant software for engineering analysis. In experimental studies of transient thermal processes, it is sometimes impossible to conduct direct measurements of required physical quantities, and these characteristics are inferred from the results of indirect measurements. The only way to find the required physical quantities for such problems is to solve the inverse boundary problems for heat conduction. There are a number of applied studies in which it is impossible to determine the initial conditions. The mathematical models of such problems have the form of inverse boundary value problems with unknowns initial data. An important task is to determine the biophysical parameters of biological tissues. For example, in [24] the laws of the propagation of various types of elastic waves in biological tissues in the range of acoustic frequencies are investigated theoretically and experimentally. The contributions of imaginary and real components of the complex modulus elasticity to the speed of elastic waves is analyzed. It is shown that in soft tissues, low-frequency elastic disturbances propagate mainly as transverse waves.",
"In the bankruptcy andreorganization cases of92listedcompanies the bankruptcy andreorganization enterprises dividedthe claims intheir reorganization plans intoemployee claims tax claims property securedclaims ordinary small claims andordinary large claims etc The four types of claims are basically repaidinfull incash andthe claims securedby property may involve the2repayment of retaineddebts while the repayment methods of ordinary large amount claims include cash repayment repayment of retaineddebts repayment of debts with accounts receivable andrelatedparties In addition tocompensation 53listedcompanies mainly repaidthrough debt to equity swaps accounting for58 70%of the total It can be seen that debt to equity swaps are the preferredmethodof debt repayment for most bankrupt companies Judging from the actual practice of swaps from my country s debt toequity the swap from my country s current debt toequity has problems such as being difficult for national andprivate companies that are easy tosign contracts The core of these issues is reflectedinthe planning voting implementation anddisclosure of the swap plan from debt toshares Process Among them swap pricing from debt tostocks is especially important If all parties do not reach agreement on the outcome of reasonable pricing during the trading process",
"Overall, as shown in Coding Issues. To see how many of the unsolved problems are due to coding issues, we check how many of them have the correct description as evaluated by a human, but not have the correct code. This turns out to be 8 out of 61, as shown in Table 2 (See Appendix E for details). This means that if we could learn the primitive/helper functions better and have a wider range to choose from, we can improve solve rate. To solve the rest of the problems, we will have to incorporate better views -it is observed that GPT-4 cannot solve line continuation tasks, especially for diagonal lines, grid manipulation tasks, and symmetry tasks easily, and these could easily be incorporated as additional views. To see how much iterative environmental feedback helps, we look at number of tasks solved with the iterative environment feedback loop. This turns out to be 7 tasks out of 50, as shown in Table 3 (See Appendix E for details). This is quite significant, and highlights the importance of environmental feedback.",
"We examine the problem of two-point boundary optimal control of nonlinear systems over finite-horizon time periods with unknown model dynamics by employing reinforcement learning. We use techniques from singular perturbation theory to decompose the control problem over the finite horizon into two sub-problems, each solved over an infinite horizon. In the process, we avoid the need to solve the time-varying Hamilton-Jacobi-Bellman equation. Using a policy iteration method, which is made feasible as a result of this decomposition, it is now possible to learn the controller gains of both sub-problems. The overall control is then formed by piecing together the solutions to the two sub-problems. We show that the performance of the proposed closed-loop system approaches that of the model-based optimal performance as the time horizon gets long. Finally, we provide three simulation scenarios to support the paper's claims.",
"Previous work [9] shows that k-Longest Path can be solved deterministically in 4.884 k poly(n) time. Combining these results together with the above discussion shows that k-Longest Detour can be solved over undirected graphs deterministically in 4.0817 2k + 4.884 3k/2 poly(n) ≤ 16.661 k poly(n) time, as desired. In this paper, we obtained faster algorithms for k-Detour and k-Longest Detour over undirected graphs. However, many mysteries remain surrounding the true time complexity of these problems. We highlight some open problems of interest, relevant to our work. the algorithm of [8] shows that k-Longest Detour is FPT on any class G of graphs where the 3-Disjoint Paths6 problem can be solved in polynomial time. This implies, for example, that k-Longest Detour is FPT over directed planar graphs (see also [11], which presents a more direct argument showing k-Longest Detour is FPT in directed planar graphs). More recently, [13] showed that k-Longest Detour is FPT on any class G of graphs where the 2-Disjoint Paths problem can be solved in polynomial time.",
"We compare with FEVER and VitaminC to show that the in-the-wild claims and cited articles in WICE constitute diverse and challenging verification problems. We bucket verification problems from these datasets into the following categories. lected from the development sets with these categories for the three datasets. 9 In WICE, we annotated 127 subclaims in 50 claims. Table 3 shows the estimated distribution. We can see that natural claims in WICE involve difficult entailment classification problems often requiring some kind of inference even at the subclaim level. In contrast, relatively few claims in VitaminC involve inference, but mostly require narrower types of reasoning such as calculation. We have three main questions for our dataset. 1) How well do existing NLI models perform offthe-shelf when using the "stretching" paradigm? 2) Does fine-tuning on our dataset improve accuracy? 3) Would being able to retrieve the relevant supporting sentences improve accuracy further? We benchmark the performance of NLI models on WICE in both off-the-shelf and fine-tuned settings. 9 If more than one category applies, we assign the most "difficult" category (latest in our list). Examples are given in Table 13 in the appendix.",
"In this theorem, the claim (i) was proven in [10], and the claim (ii) in [46]. Moreover, the claim (i) was the first result on the wright asymptotic order of sampling n-widths for classes of functions having a mixed smoothness. It is interesting to notice that all these cases are restricted by the strict inequality p < q. From very recent results of [26] on inequality between the linear sampling and Kolmogorov n-widths of the unit ball of a reproducing kernel Hilbert subspace of the space L 2 (Ω; ν) one can immediately deduce the right asymptotic order of ϱ n (W r 2 , L 2 (T d )) which solved the outstanding Open Problem 4.1 in [17] for the particular case p = 2. This open problem is still not solved for the case p ̸ = 2. Unfortunately, even in the solved case p = 2, we do not know any explicit asymptotically optimal linear sampling algorithm since its proof is based on an inequality between the linear sampling and Kolmogorov n-widths. The problem of construction of asymptotically optimal linear sampling algorithms for this case is still open. The following theorem claims an inverse inequality [26, Theorem 1].",
"To obtain reliable and faithful SICE, the term S 1 imposes the structure sparsity on S. controls the trade-off between the amount of sparsity and the log-likelihood estimation. The problem in Eq. 1) is convex and can be solved by the off-the-shelf packages such as GLASSO [10] and CVXPY [8]. However, the objective is non-smooth due to the 1 penalty. The above optimisation packages cannot be used with CNN layers to conduct training with backpropagation. However, based on our investigation, it has the following issues: 1) it cannot efficiently solve large SICE problems, i.e., of size 128×128 or higher; (2) it relies on multiple CPU based libraries including CVXPY to solve the optimisation problem and obtain gradients for backpropagation. This greatly limits its efficiency due to the lack of GPU support. The above limitations motivate us to develop an SICE method suitable for end-to-end training with GPU.",
"For the issues of image contrast, Table 1 shows that there are 71 image issues before repair, and 50 problems were repaired by Iris, including 35 different components. Therefore, the success rate of image contrast is 70.4%. There are still 21 issues that have not been solved for the reason of the shortcomings of using bounds to locate. Finally, the overall success rate of the two types of issues is 91.38%, and the total number of issues repaired is 668. Figure 9 presents several examples of the repair results by Iris. More examples can be found on our website [11]. Apart from evaluating the effectiveness of Iris in the number of repairs, we also record the execution time of in practice. When repairing an APK, Iris will first use Xbot to detect the accessibility issues in it, update the reference DB, and then start to automatically repair. Finally, the average time of issue detection and DB updating is 100.7 seconds, and the average time of repair is 136.2 seconds. Compared with manual repair, Iris can greatly shorten the time of attribute localization and provide a feasible reference value for color replacement. Thus, Iris has high time efficiency.",
"The U.S. Board of Veterans' Appeals1 (BVA) is an administrative body within the U.S. Department of Veterans Affairs (VA) responsible for hearing appeals from veterans who are dissatisfied with decisions made by VA regional offices. The BVA reviews a wide range of issues, including claims for disability compensation, survivor benefits, and other compensation and pension claims. Walker et al. 36] analyzed 50 BVA decisions issued between 2013 and 2017. The decisions were all arbitrarily selected cases dealing with claims by veterans for service-related post-traumatic stress disorder (PTSD). For each decision, the researchers manually extracted sentences addressing the factual issues. The sentences were then manually annotated with rhetorical roles they play in the respective decisions [35]. Figure 1 (left) shows the distribution of the labels.",
"where the last equality is again due to Proposition 3.6. This concludes the proof of (a). To prove (b), we first show that problem (62) has a unique minimizer. Therefore the objective function in ( 62) is strongly convex on R d0 To this end, we notice that Thus 1 |G| g∈G g ⊤ γ * satisfies the constraints in (62). Assume for the sake of contradiction that 1 |G| g∈G g ⊤ γ * = β * , then by the uniqueness of the minimizer of (62) we have where γ := Proj Range(A) β * . We make the following two claims to be proved shortly: Claim 2: Proj Range(A) These two claims combined with (83) imply that γ satisfies the constraint in ( 61) and has a smaller norm compared to γ * , γ < γ * . This contradicts the fact that γ * is the minimizer of problem (61). If we assume in addition that ρ 0 is unitary on X 0 , then and hence problems ( 62) and ( 63) are equivalent. Finally, we prove the above two claims. Hence Proj Range(A) A ⊤ γ * also satisfies the constraint in (61). This concludes the proof of Theorem 4.1.",
"PSS applies the idea of a desk crit from architecture design studios, where instructors give "informal formative assessment" to students through a discussion of their work (Dinham, 1987). In the PSS framework, instructors provide this feedback and assessment through asking unobtrusive, open-end questions (Le Doux and Waller, 2016). For example, the dialog can be initiated by asking questions such as "How are you doing?", "What are you working on now?", or "Are you making progress?" can open a dialog that can provide specific instruction, assistance, or feedback to the pair. The initial question enables the instructor to quickly determine the status of the student pair. From there more intentional and specific questions can be asked to promote deeper thinking (Raths, 1967). These dialogues are effective opportunities to discuss how the students solved a problem, what issues they are stuck on, alternative solutions, get help on practical issues, starting problem solving, conventions regarding design and communication of solutions, ways to an improve a solution, clarify conceptual misunderstandings, and so on. Also, it is natural during these dialogues to enact dynamic scaffolding by adjusting the problem difficultly up or down based on how the pair is performing. Further, these dialogues are a good time for individualized help.",
"The final example in the last section illustrates two key problems in assessing uncertainty in integrated datasets: Coupling and Robustness. The coupling problem is when the value for one row depends on the choice of value for another. The robustness problem is to mitigate the affect of outliers that can affect upper and lower bound calculations. We show that both issues can be elegantly solved with an optimization problem called unbalanced assignment (a generalization of bipartite matching). The set Ψ(𝑟, 𝑠) can be thought of as defining a graph over the entities in 𝑅 and 𝑆. Let 𝑉 𝑅 , 𝑉 𝑆 be defined as the set of all identifying tuples from both 𝑅 and 𝑆 respectively. Every valid augmentation can described as a subgraph of this bipartite graph where each 𝑣 𝑠 ∈ 𝑉 𝑆 has an edge to at most one 𝑣 𝑟 ∈ 𝑉 𝑅 . The existence of such subgraphs gets to the essence of the coupling problem shown by the example in the previous section. If we match one pair 𝑟 [𝐴 𝑖𝑑 ], 𝑠 [𝐵 𝑖𝑑 ] of identifying tuples, it affects how we can match others. Double counting happens because we don't appropriately account for this.",
"Here Sing denotes the set of singular points of the fibration G * ξ which is given by Our problem is therefore equivalent on X \ {f = 0} to the standard factorization problem solved by Ivarsson and the second author in [IK12b]. There, all the required claims of Theorem 1.1 are proved. In the aforementioned paper, the entry f (x) is actually constant 1. However, by rescaling the restriction of the fibration to X \ {f = 0}, one finds the globally integrable vector fields along the fibres spanning the tangent space at every point. The stratification in [IK12b] is given by where a ∈ O(X) is the left upper entry in the matrix We refer to the paper [IK12b] for the details. Observe that on {f = 0} the automorphism G is equal to the identity. Thus, in our local trivialization the entry a of the matrix tends to 1 when approaching the analytic subset {f = 0} of X. Therefore {a = 0} is a closed analytic subset not only of X \ {f = 0}, but also of X. Assuming the results from [IK12b], now we only have to analyze our fibration on {f = 0}.",
"or strong convergence and convergence rate) of these SCP algorithms are expected, and numerical simulations of nonconvex example problems are then needed to validate these theoretical results. In addition to guaranteed convergence, future work will also need to focus on the proof of the exactness of the utilized lossless convexification or convex relaxation techniques from theoretical perspectives by showing that the relaxed problem is equivalent to and share the same solution with the original problem. Moreover, other fundamental issues, including the feasibility of each subproblem parameterized and solved within SCP, the effects of the feasibility of subproblems on the convergence of SCP, the existence of optimal solutions to subproblems, and the quantification of time and space complexity of the problem, are also valuable to be explored to gain more certainty, transparency, and confidence in the performance of the algorithm.",
"Other issues in HFL include aggregation interval and incentive mechanism design. In Ref. 20], a joint resource allocation and aggregation interval control problem is proposed, aiming to minimize the training loss and the latency. Convergence analysis is provided to show the dependency of the convergence performance on the number of participants, the aggregation interval and training latency. Then, the original problem is decomposed into two subproblems. The resource allocation problem is proved to be convex and the optimal value can be reached. For the aggregation interval control problem, a rounding and relaxation approach is adopted. Experimental results show that the proposed scheme can reach lower latency and higher training performance compared with the baselines. In Ref. 21], a two-level joint incentive design and resource allocation problem is proposed. At the lower level, the cluster selection problem is formulated as an evolutionary game. At the upper level, the action of the cluster head is solved via a deep learning-based approach. Experiments show the robustness and uniqueness of the proposed scheme.",
"Moreover, we show that the maximal reach-avoid set can be inner-approximated by reduction to a convex programming problem, which could be solved with on-the-shell SDP solvers. The latter can be reduced to the differential invariant generation problem, which can be well solved by exploiting existing methods for computing differential invariants, e.g., [30,16,48,45]. If safety and liveness properties are considered together, we have to address the following two problems: how to guarantee to reach to the target set in or the must-jump part of the guard of a jump outgoing from a mode while keeping safe until reaching the target or leaving the mode via the jump, and how to avoid the unreachability caused by infinite loops among the modes. The must-jump part of the guard of a jump means the intersection of the guard and the complementation of the domain of the pre-mode of the jump, to which the jump must take place immediately in case a trajectory reaches. The former problem essentially corresponds to a reach-avoid problem, while the latter problem can be solved by searching all simple loops among the modes and blocking them. We implement a prototypical tool and provide several examples to demonstrate the effectiveness and performance of the proposed method.",
"Problem ( 9) is nonconvex since the optimization variables {w k } K k=1 and v are coupled together and multiplied by each others in the SINR constraint in (9b). Therefore, the AO technique is used to tackle this problem by dividing (9) into two sub-problems and solving them iteratively until convergence. In the nth iteration, the optimization is done w.r.t. Then, the problem is solved w.r.t. In the next section, we illustrate the details of the two sub-problems and how they are solved in each iteration of the AO algorithm. In this section, we propose efficient and low complexity solutions to the active and passive beamforming sub-problems of the AO algorithm, when perfect channel state information (CSI) knowledge is available at the BS. Specifically, FP is employed to deal with the SINR constraints in (9b). Then, the active beamforming at BS sub-problem is solved using SDP, while the sub-problem of optimizing the IRS reflection coefficients vector, v, is solved by utilizing SOCP and convexconcave procedure (CCP). We will also show that the proposed SOCP-CCP approach to optimize the IRS vector has much lower complexity than the SDR technique which is widely used in the IRS literature.",
"The pseudocolor plot of η(x) is shown in Figure 4 (B). The infinite domain is truncated to a finite one using the technique of perfectly matched layer (PML). The computational domain is −3 3 × −3 3 with a rectangular PML layer of thickness 0 5 The direct problem is solved by the finite element method FEM and the synthetic data is measured at 32 uniformly distributed points on the circle centered at the origin with radius 2 For any fixed source terms the direct problem is solved for 5 wavenumbers κ j = 0 5 + j π j = 0 1 2 3 4 A schematic of the computational domain for the direct problems is shown in Figure 4 C The computational domain is [−3, 3] × [−3, 3] with a rectangular PML layer of thickness 0.5. The direct problem is solved by the finite element method (FEM) and the synthetic data is measured at 32 uniformly distributed points on the circle centered at the origin with radius 2. For any fixed source terms, the direct problem is solved for 5 wavenumbers κ j = (0.5 + j)π, j = 0, 1, 2, 3, 4. A schematic of the computational domain for the direct problems is shown in Figure 4 (C).",
"According to Theorem 2, we know that the finite-time adaptive H ∞ control problem of the system (1) is solved under the controllers (35) and the MDADT switching scheme (36), i.e., the closed-loop system consisting of (1), (34) and (35) has a finite-time L 2 -gain with the prescribed disturbance attenuation level γ ≈ 1.6881 w.r.t. Now, we verify the above claims by the simulation experiments. In addition, in view of (36), for m ∈ N ∪ {0}, the switching rule is choreographed as When the switching signal δ(t) meets the switching rule (37), the corresponding simulation results is presented in Fig. 2. As shown in Fig. 0, 2, 9, I 2 , 28 25 , δ), i.e., the adaptive mode-dependent state feedback controllers (35) has strong robustness against the disturbance ω(t).",
"However, the data collection in time-varying ocean currents is plagued by two critical issues: AUV yaw and sensor node movement. We propose an adaptive AUV-assisted data collection strategy for ocean currents to address these issues. First, we consider the energy consumption of an AUV in conjunction with the value of information (VoI) over the sensor nodes and formulate an optimization problem to maximize the VoI-energy ratio. The AUV yaw problem is then solved by deriving the AUV's reachable region in different ocean current environments and the optimal cruising direction to the target nodes. Finally, using the predicted VoI-energy ratio, we sequentially design a distributed path planning algorithm to select the next target node for AUV. The simulation results indicate that the proposed strategy can utilize ocean currents to aid AUV navigation, thereby reducing the AUV's energy consumption and ensuring timely data collection.",
"In the following, we first consider a more general CCP formulation that can be solved by the proposed framework. We provide a brief summary of the nonconvex scenario optimization framework [11] to show how this general class of CCPs can be solved via its associated SP. We then present our main results in Section V and Section VI which allows the SP to be solved in closed loop, while maintaining feasibility with respect to the CCP. Section VII shows how to solve Problem 1 by solving its associated SP in an MPC framework. Finally, Sections VIII and IX apply this MPC framework to generate safe motion plans for a robot navigating among pedestrians. NONCONVEX SCENARIO OPTIMIZATION In the following, we summarize the main results of the NSO framework [11] that we use to build our motion planning framework. To this end, consider the following generalization of Problem 1: The constraints g(x, δ) ≤ 0 must be satisfied with a probability of at least 1−ϵ. The main idea of scenario optimization [11] is to solve Problem 2 by imposing deterministic constraints for a set of scenarios ω = {δ (1) , . The number of sampled scenarios is known as the sample size S. The SP associated with Problem 2 is given by Problem 3 (General SP).",
"Thus, these problems can be solved by enumerating objects similar to assignments in finite-domain CSPs, and the question is then whether it is possible to solve the problem faster than exhaustively enumerating all orderings, similar to how it is a major open question whether CNF-SAT is solvable in O * (c n ) time for some c < 2. This is indeed known to be possible for certain reasoning problems, e.g., A, which recently has been solved in O * ((1.0615n) n ) time [11], and if the problem is restricted to intervals of length one then it can even be solved in 2 O(n log log n) time [9], and if no point occurs inside more than k intervals then it can be solved in O * (k n ) time [12]. However, despite these improvements, we are still far away from an unconditional single-exponential O * (c n ) time algorithm and even further away from the best-known lower bounds which only rule out subexponential algorithms running in 2 o(n) time under the exponential-time hypothesis [15]. Hence, cutting-edge research suggests that qualitative reasoning problems in many cases admit significantly improved algorithms even though general single-exponential running times seem to be out of reach with existing methods.",
"Problem Decomposition: When faced with a complex problem, LLMs have shown to benefit from decomposing said problem into smaller, more manageable subproblems (Perez et al., 2020;Huang and Chang, 2022). This technique has been manifested through various prompting strategies, including least-to-most prompting (Zhou et al., 2023;Drozdov et al., 2023), successive prompting (Dua et al., 2022), and decomposed prompting (Khot et al., 2022). ABCD is also a prompt-based problem decomposition technique, but rather than decomposing questions into subquestions, we decompose questions into a series of true/false claims. Claim Decomposition: ABCD is most similar to Chen et al. 2022), who decompose political claims into a series of yes/no questions, and similarly calculate the proportion of questions with "yes" responses. However, using claim decomposition in question answering introduces new challenges, such as linking consistent entities in multi-hop reasoning questions ( §3.1). Further, ABCD is a prompting strategy, while Chen et al. 2022) finetune T5 to decompose claims. Another difference is that we use ABCD to verify LLM answers through self-evaluation, while Chen et al. 2022) build a retrieval system to evaluate their yes/no questions.",
"We obtain the following relationships between the pro-rata index and the user-centric index and some of the rules from the literature on claims problems. Theorem 4 Let (N, M, t) be a streaming problem and (N, K, c, E) be the associated claims problem. 20 Actually the tuple (N, K, c, E) is defined as in claims problems but some additional constraints on c, E and the definition of a rule are added. As our results are not affected by that, we avoit the details. The proof of Theorem 4 can be found in the appendix. From Theorem 4 (a) we obtain that the pro-rata index and the user-centric index can be rationalized as weighted proportional rules. We have seen in Theorem 1 that they are weighted indices. Thus, one might naturally conjecture whether there is a bijection between weighted proportional rules (for claims problems) and weighted indices (for streaming problems). The answer is not. We can indeed find weighted proportional rules that can not be obtained through a weighted index and weighted indices such that the induced rule to allocate awards in streaming problems is not a weighted proportional rule.",
"Theorem 4 (b) and 4 (c) allow us to consider both indices from another perspective. As the statements indicate, the allcation rules they induce (for streaming problems) can actually be described as two-stage (bankruptcy) rules where we first decide the importance of each user an then the importance of each artist for each user, which is computed as the sum over all users. The pro-rata and user-centric indices measure the importance of each artist in different ways. For the latter, all users have the same importance, whereas for the former the importance of each user is proportional to the user's streams. They then measure the importance of each artist for each user in the same way; namely, proportionally to the artists' streams. To conclude with this section, we reiterate that Theorem 4 states that both indices could be rationalized as a combination of two well-known rules from the literature on bankruptcy problems. Besides, both of them can also be seen as members of the same family of weighted proportional rules. Thus, we can conclude that (in contrast with the previous sections) the analysis in this section does not favor one of the indices over the other.",
"Based on the analysis in this section, we can safely state that the game-theoretical approach favors the user-centric index with respect to the pro-rata index. In this section, we consider another (indirect) approach to solve streaming problems, based on claims problems. Claims problems refer to an amount of a homogeneous and infinitely divisible good (e.g., money) to be divided among a set of agents, who have claims on the good. This is certainly the case of the canonical and well-known bankruptcy problem. Some popular rules are the proportional rule, which yields awards proportionally to claims, and the constrained equal awards rule, which equalizes the amount received by each agent as much as possible. Formally, for each (N, c, E) and each i ∈ N, Besides, U i (N, M, t) , the amount received by artist i under the usercentric index, can not be computed through (N, T, m) because it depends on numbers (t ij ) that do not appear in (N, T, m) . We now present an extension of bankruptcy problems, following Ju et al., (2007), that allows us to use the values t ij . Ju et al., (2007) characterize several families of rules. 19 One of the families is formally defined as follows.",
"We note that the rewards induced by the pro-rata index could be outside the core. To do so, consider for instance Example 1 and S = {1} . On the other hand, by definition, the user-centric index yields for each problem (N, M, t) an allocation that belongs to A (N, M, t). Thus, by Theorem 2, the rewards induced by the user-centric index always belong to the core. More generally, we introduce the axiom stating that the rewards generated by an index should always lie within the core of the associated cooperative game. As the next result states, core selection actually characterizes the user-centric index, when combined with the axioms of homogeneity and additivity (already introduced in the previous section), provided we restrict to the following relevant subdomain of streaming problems. More precisely, let P * be the set of all problems (with at least three users) where no user has played content from all the artists. The proof of Theorem 3 can be found in the appendix. 17 Nevertheless, one could extend Theorem 3 to the full domain P upon simply adding an axiom of independence of null artists, stating that removing an artist with no streamings does not change the value of the index for the remaining artists.",
"The game-theoretical approach is an indirect way of solving streaming problems. The problem of adjudicating conflicting claims (in short, claims problem) models a basic situation in which an endowment is allocated among agents who have claims on it, and the available amount is not enough to fully honor all claims. This is a classic problem that can be traced back to ancient sources, such as Aristotle and the Talmud, although is formal treatment is somewhat recent (e.g., O'Neill, 1982; Thomson, 2019). Ju et al. 2007) generalize these problems to account for multiple issues. 10 We show that some of the two-stage rules from generalized claims problems can rationalize some of the allocation rules we consider to solve streaming problems. The rest of the paper is organized as follows. In Section 2, we introduce our model and main concepts for our analysis of streaming problems. In Section 3, we present the axiomatic approach to our problems including the main characterization results we obtain. In Section 4, we explore the game-theoretical approach to our problem. In Section 5, we explore another indirect approach to solve our problems based on claims problems. Most of the proofs have been postponed to an appendix.",
"Thomson ( 2019) is an excellent survey with an extensive treatment of the sizable literature emanating from that seminal contribution. 19 The do so mostly thanks to the axiom of reallocation-proofness, which does not have a parallel in this paper. The weighted proportional rule associated to ω assigns, for each problem (N, K, c, E) and each i ∈ N the amount In words, rule P ω first applies the proportional rule to each single-dimensional sub-problem (N, {j} , c .j , E) Calleja et al., (2005) introduce multi-issue allocation situations, which are a particular case of claims problems. 20 Bergantiños et al., (2010, 2011, 2018) consider two-stage rules for claims problems where in the first stage the endowment is divided among the issues and in the second stage the amount assigned to each issue is divided among the agents. The final amount received by each agent is the sum over all issues. Formally, let ψ and φ be two bankruptcy rules. The two-stage rule R ψ,φ the claims rule obtained from the following two-stage procedure: 1. First stage. 2. Second stage. But all those papers take the axiomatic approach inspired on the literature of bankruptcy problems, whch is unrelated to the axiomatic study of this paper.",
"Notice that under pro-rata the importance of each user to all artists, namely i∈N w (j, t. Under user-centric all users have the same importance, namely i∈N w (j, t. j ) t ij = 1. We can consider weight systems reflecting that all users have a minimum importance and also that users with more streamings contribute more, but with an upper bound. Notice that ω (j, x) does not depend on j. Thus, we only distinguish users through the streaming times. More precisely, all users with a number of streams below α have the same importance as in the user-centric index. Finally, users with a number of streams above β have the same importance, given by the thresholds α and We now revisit Example 1, adding a new user (c) with 5 streams for artist 1 and 35 streams for artist 2. Besides, let α = 20 and β = 60. Then, the weigthed index I α,β defined as above is In this case, P (N, M, t) = (15, 125) whereas U (N, M, t) = (1.125, 1.875). Then, the rewards induced for artists are We observe that I α,β yields an allocation in between those the pro-rata and user-centric yield.",
"Our analysis should contribute to the debate between the pro-rata and user-centric methods in the music industry. Nevertheless, the discussion in that industry nowadays goes beyond the debate between those two methods. For instance, the French streaming service Deezer claims to be pioneer in fair payments to artist, "being a main advocate for a re-evaluation of music streaming's economic model". In March 2023, Deezer announced an initiative with Universal Music Group, the world leader in music-based entertainment, to explore new streaming models that better align the interests of artists, fans and streaming services. Using deep data analysis, this partnership aims to improve the fairness of the current streaming model in various ways, whether by helping artists monetize their music better or by eliminating issues within the current system. This initiative will not prioritize just the most-streamed artists on the platform, but will level the playing field for artists at every stage of their career and benefit the wider music community as a whole. 21 We believe that some of these goals might be achieved with other members of the more general family of weigthed indices we characterize in this paper. As we mentioned above, weigthed indices are precisely constructed on the premise that each artist is assigned the weighted aggregation of streamings, accross users, with the weight depending on the user and her streaming profile. a) We first prove that each weighted index satisfies the two axioms.",
"Let t = (t ij ) i∈N,j∈M denote the corresponding matrix encompassing all playing times. 11 The set of problems so defined is denoted by P. 10 Csóka and Herings (2018, 2021) also studied recently a different generalization of claims problems to deal with financial networks, as pioneered by Eisenberg and Noe (2001). See also Calleja and Llerena (2023). 11 As mentioned at the Introduction, and for ease of notation, we normalize the amount paid by each user to 1. Thus, the amount to be divided among artists in a problem (N, M, t) is just m, the number of users. We define the set of fans of each artist as the set of users who have played content from the artist at least once. Similarly, we define the list of artists of a user as those from which the user has played content at least once. We illustrate our model with a basic example that will surface throughout our ensuing analysis. Assume two users (a, b) join a platform to listen to their favorite artists (1,2). Assume artist 1 is listened by user a, whereas artist 2 is listened by user b. This situation can be included in our theoretical model as follows. A popularity index (I) for streaming problems is a mapping that measures the importance of each artist in each problem.",
"In this section, we take the axiomatic approach to solve streaming problems. That is, we formalize axioms of indices that model principles with normative (ethical or operational) appeal. Some of them will echo the concern that artists are paid fairly. 13 Some others are inspired from related discussions in the music industry or from the literature on resource allocation. Then, the index should preserve that ratio. The second axiom is also a standard axiom in resource allocation and says that if we can divide a problem in the sum of two smaller problems, then the solution to the original problem should be the sum of the solutions in the two smaller problems. The intuition in streaming problems is the following. Then, we can reward artists in two ways. First, we consider all countries in the same market and we allocate artists according with the streams in all countries. Thus, each artist receives an allocation in each country according with the streams in this country. The total allocation to an artist is the sum over all countries. Additivity says that both ways should coincide. We now consider two alternative forms of modeling the impact of extra users. We can then consider that both users are similar for artist i. Then, both users should have the same impact over this artist. Equal individual impact of similar users. It is also often argued that as all users pay the same, all users should have the same impact on the index.",
"We now consider the related case of coalition formation in surplus sharing problems. Surplus sharing problems (Moulin, 2002) are opposite to rationing problems in the sense that endowments exceed the sum of claims. We define some focal surplus sharing rules. We start with the counterpart family of parametric rules in this setting (Moulin, 1987b). The family of parametric rules includes some well-known rules such as the proportional rule, P , the uniform gains rule, U G, and the equal surplus rule, ES (see Thomson, 2019). 13 All parametric rules for surplus sharing problems satisfy solidarity. Thus, our Theorem 1 guarantees that the induced coalition formation problems are non-circular and, as our Corollary 1 states, that the core is not empty.14 However, there are also (non-parametric) rules for surplus sharing problems that do not satisfy solidarity. For instance, let us consider the following extension of the random arrival rule from rationing problems to surplus sharing problems. First, award all agents their claims as many times as the endowment allows. Then, assign the residual endowment (if it exists) sequentially according to an ordering of the agents. The extended random arrival for surplus sharing problems (ERA) gives each agent the average payoff over all possible orderings.",
"The closest paper to ours is Alaei et al. They also consider the same streaming platforms that we consider here, which generate revenues by charging users a subscription fee for unlimited access to the content and compensate artists through an allocation rule. They also assume that users are heterogeneous in both their overall consumption and the distribution of their consumption over different artists, but they model this by referring to the probability (per usage) that each user type wants to consume the content of each artist. In our case, we talk about the number of times a user plays an artist. But, leaving this minor aspect aside, our models are essentially equivalent. They are also concerned with the pro-rata and user-centric revenue allocation methods, but focus on characterizing when these two methods can sustain a set of artists on the platform, as well as comparing them from both the platform's and the artists' perspectives. In particular, they show that, despite the cross-subsidization between low-and high-streaming-volume users, the pro-rata method can be preferred by both the platform and the artists. 3 More precisely, they show that artists who are predominantly listened to by users whose overall streaming volume (consumption) is high receive higher payments with the pro-rata allocation than with the user-centric allocation.",
"We take several approaches to analyze our model. In the first (axiomatic) approach, we present axioms that formalize normatively appealing principles for popularity indices. Some convey structural ideas that reflect operational features of the index. For instance, additivity says that if we can present a problem as the sum of smaller problems (such as in, say, multinational platforms), then the index in the original problem should coincide with the sum of the indices in the smaller problems. And homogeneity says that the index should reflect accordingly the cases in which each user has reproduced content from a given artist a certain times more than content from another artist. Other axioms (equal individual impact of similar users and equal global impact of users) model alternative forms of marginalism, i.e., the impact of extra users in the platform. And we also consider axioms reflecting concerns for fairmess: reasonable lower bounds and click-fraud-proofness. The first one states that artists should at least receive the amount paid by the users that only played content provided by them. The second one states that if a user changes streaming times, then the amount received by each artist could not change more than the subscription paid by that user. We explore how the main indices perform with respect to all these axioms, but our main result in this axiomatic approach to the problem (Theorem 1) states three characterization results.",
"We have analyzed in this paper the problem of sharing the revenues raised from subscription fees to music platforms among participating artists. Our analysis has highlighted two central methods (pro-rata and usercentric) which can actually be seen as focal (and somewhat polar) members of a family of methods which evaluate artists by the weighted aggregation of users' streaming choices. The weight assigned to each user might actually depend on the user herself and her whole streaming profile. We therefore provide a solid common ground for both methods, in the form of the characterization result for the whole family. We, nevertheless, provide additional (normative, as well as game-theoretical) arguments to favor the user-centric method with respect to the pro-rata method. To wit, we show that the former satisfies two natural and appealing axioms (reasonable lower bound and click-fraud proofness) that the second violates. They are somewhat connected to a feature the second exhibits (whereas the first does not); namely, cross-subsidization between high-and low-streamingvolume users. Furthermore, the former satisfies core-selection, while the latter does not (which implies that it does not guarantee allocations preventing incentives for artist to leave the platform)",
"That is, pro-rata gives to artist 2 much more than to artist 1, whereas the user-centric gives the same to both artists (which sounds more reasonable in this example). This might illustrate why some platforms are moving from pro-rata to user-centric. 12 There is also room for alternative payment schemes beyond the previous two. For instance, Meyn et al.., (2022) write "remuneration based on quality ratings, or a combination of user-centric and pro-rata remuneration". And, also, "in addition to pro-rata and user-centric, the distribution parameters (e.g., the unit of distribution) must be investigated further". Partly accounting for this motivation, we now introduce a family of indices that allow us to consider alternative payment schemes compromising between the previous two. The index of each artist is obtained as the sum, over all users, of the streams of the user weighted by a factor that depends on the user (allowing, for instance, for popular users to have more importance than unknown users) and the streaming profile of the user (allowing, for instance, for more active users to have more importance than less active users).",
"Pycia (2012) shows that the Nash bargaining solution guarantees a nonempty core of the induced coalition formation problem. As the Nash bargaining solution satisfies solidarity, we know from our Theorem 1 that it induces non-circular coalition formation problems and, thus, stability is guaranteed. Pycia ( 2012) also shows that the core of the induced coalition formation problem from the Kalai-Smorodinsky bargaining solution can be empty for some coalitional endowments. As the Kalai-Smorodinsky bargaining solution fails to satisfy solidarity, we know from our Theorem 1 that it does not always induce non-circular coalition formation problems. As we mentioned in the Introduction, our results generalize those obtained by Gallo and Inarra (2018) to any resource allocation situation (beyond rationing problems). We analyze in this subsection how our results apply to that particular case of rationing problems. These problems pertain to situations where agents have claims over the endowment that cannot be fully honored and sharing rules take those claims into account to yield the allocations.9 Formally, let N = {1, . n} and let A focal family of sharing rules for rationing problems is the so-called family of parametric rules (Young, 1987).",
"2022) argue that "streaming services generated $4.3 billion in the first half of 2019 (with 77% of that coming from paid subscriptions)". Users typically pay a fixed (monthly) amount to freely access their libraries. A common practice for platforms is to distribute around 70% of the revenue received from subscriptions among artists (e.g., Meyn et al., 2023). Platforms also raise money from other sources (for instance, advertisements) but the most important source are subscriptions and we shall concentrate on them here. An ensuing interesting problem is to allocate the corresponding part of those revenues among participating artists, based on their streaming times. This will be the object of study in this paper. Based on this input, a popularity index, which measures the importance of each artist, is constructed. The reward received by each artist from the revenues generated in each problem is based on such a popularity index. The most frequent indices are the so called pro-rata index, which renders artists rewarded proportionally to the total number of streams and the so called user-centric index, which renders artists rewarded so that the revenue generated by each user is divided among the artists listened by the user proportionally to the total number of streams. 2 Then, the amount received by each artist is computed by adding the amounts obtained from each user.",
"In many economic models, agents are characterized by different features (such as utility functions, claims, or ranking positions) that could be taken into account to distribute a given endowment among them. Our results show that, regardless of these characteristics, as long as the sharing rule satisfies solidarity, it will induce a coalition formation problem with a non-empty core. In this section, we first relate our results to existing results for bargaining problems (Pycia, 2012) and rationing problems (Gallo and Inarra, 2018), and we then develop two novel applications for surplus sharing problems and ranking problems. Finally, we illustrate how our results can be applied to resource allocation situations with permissible coalitions. We first consider the case of coalition formation in bargaining problems introduced by Pycia (2012). In this model, agents have utility functions which may be taken into account by the rule to get the final allocations. The two focal rules in this model are the so-called Nash bargaining solution, N , and Kalai-Smorodinsky bargaining solution, KS. The first one maximizes the product of agents' utilities (see Nash, 1950). The second one equalizes the relative gains -the gain of each player relative to its maximum possible gain -and maximizes this equal value (see Kalai and Smorodinsky, 1975).",
"second,establish a review method with non-active review as the principleandactive review as an exception,andstrengthen the prevention of false litigation.Through these improvement measures,it is hoped to provide a reference for promoting the rationalizationandstandardization of court judgments injudicial practice on the determination of the natureandvalidity of debt inkind.Keywords:Debt inkind; agreements inkind; agreements on promises; autonomy of intentions; 然而,由于现行以物抵债规定分布零散不成体系,最高院也未发布专门的司法解释,导致各地法院对以物抵债的性质及效力存在不同认识甚至出现争议与困惑,最终演化成司法实践中的问题。 例如“通州建总案”中法官将以物抵债认定为诺成合同但却又附加额外条件允许满足一定条件的合同为要物合同,造成前后矛盾; 再例如“上海索朗案”中法官对以物抵债盲目适用让与担保的效力规则认定该以物抵债协议无效,忽略当事人的意思表示。 涉及以物抵债纠纷的案件多集中于借款合同纠纷、买卖合同纠纷等,司法实践中对于部分以物抵债纠纷普遍存在裁判尺度不一、同案不同判现象。",
"关键词:保证债务诉讼时效; 主债务诉讼时效; 一般保证; 连带责任保证; 独立性ABSTRACTAbout deal with the relationship between the guarantee debt in with the mainstatute of limitations in the"security law"and"the Supreme People's Court on theapplicable<security law of the People's Republic of China>,the explanation of someissues of had rules,however,after the lapse of the above-mentioned law,thisprovision has not been absorbed by the Civil Codeandrelated judicial interpretations.Under the background of the change of oldandnew laws,how to deal with therelationship between guarantee debtandlimitation of action of main debt is aninevitable problem.In discussing the relationship between the two,this paperconcludes that the principle of independence should be applied to the limitation ofaction of guaranteed debt.Specifically,the limitation of action for surety debt must bestarted separatelyandnot be disturbed by the calculation of the beginning oflimitation of principal debt.To ensure that the limitation of debt will not expire due tothe expiration of the limitation of main debt,the right to raise defenseandthe right toabandon defense do not affect each other;",
"由频数统计结果可以得出,调查对象对投诉反映灵敏度满意度主要集中于“比较不满意”等级和“一般”等级,分别有11个和110个,占比都在42%以上; 评价“非常满意”和“非常不满意”的企业数都为4个,占比极小。 综上所述,大部分失业保险的受众企业对失业保险经办服务投诉反映灵敏度满意度主要集中一般及以下水平,非正向评价占比为87.4%,说明当前受众企业对失业保险经办服务投诉反映灵敏度是相当不满意的,失业保险经办机构处理投诉的办法值得商榷。 众数为2,持“比较不满意”态度的调查企业数最多; 评价“非常满意”和“非常不满意”的企业数为分别为3个和5个,占比较小。 综上所述,大部分失业保险的受众企业对失业保险经办机构的改进服务满意度主要集中一般及以下水平,非正向评价占比为92%,说明当前公众对失业保险经办机构的改进服务是相当不满意的。 面对存在的问题,失业保险经办机构在及时第三章四川省成都市失业保险管理的现状55完善流程,提升服务水平方面还有所欠缺。 众数为3,持“一般”态度的调查企业数最多;",
"针对公司存在的这些问题,本人会探究这些问题产生的原因是什么。 然后通过行业比较分析、调查研究、案例分析等手段,提出相应的解决措施。 最后,提出相应的保障措施来确保解决措施顺利执行。 本论文的主要研究内容如下:第一章为本论文的绪论部分。 绪论部分主要介绍本论文的摘要、论文题目的来源,论文写作的目的,所选择的论文题目在国内外的相关研究现状,以及论文研究的主要研究内容和研究方法。 紧接着针对公司目前客户服务的现状,找出公司目前在客户服务方面所存在的一些问题。 对于这些问题,在本章的结尾部分也对问题产生的原因进行了一定的分析。 首先先介绍针对与客户服务优化方案的总体原则,即客户服务满意度优先原则、优化措施哈尔滨工业大学工商管理硕士学位论文-9- 全面化原则、相关考核指标量化原则,同时在本章还介绍了解决方案的大概的优化思路,然后着重介绍了相应的解决方案,即:完善基础物业服务体系建设、完善投诉处理和响应机制、加强对外部保洁公司的管理、积极开拓增值服务项目以及完善项目停车管理。 通过一定的保障措施,来确保优化方案能够顺利的贯彻执行。 结论部分主要负责对本论文进行总结,通过整理之前章节的内容,最后得出相应的结论。 1.4.2论文主要研究方法本论文计划运用相关的研究方法来研究此课题。",
"就复议程序而言,以往司法实践中是由原合议庭进行书面审查,对此笔者认为,为确保复议的客观中立性,可规定被申请人有两次复议机会,一次向原合议庭,对原合议庭的复议结果不服的可在规定期限内向上级法院申请二次复议。 就赔偿制度而言,笔者认为,如若错误适用了诉前禁令制度,除该错误由审裁机构或其工作人员过失导致以外,申请人应当对被申请人因此遭受的损失进行赔偿。 诉前禁令制度适用错误一经认定,被申请人即可在规定期限内向原管辖法院提起侵权赔偿之诉,法院应另行组成合议庭进行审理。 三、有效适用利益衡量法由前文可知,当前的司法实践中,利益衡量法的适用存在评判理念偏失这一关键问题。 同时,在适用利益衡量法时缺乏有效方法论的指引,以至于法院在审裁互联网不正当竞争案件时未能均衡保护多方利益、平衡各方利益冲突,因此有效运用利益衡量法成为了当务之急。 对于上述问题,学界和实务界经过研究指出,比例原则作为利益冲突的解决方法,具有普遍适用性,对涉案行为进行正当性判定时可以引入比例原则。",
"程序保障Abstract IIIAbstract The problem of dealing withpart claims in civil litigation is essentially how to deal with the remaining claims after the first prosecution in the case of the plaintiff splitting his substantive claimsandfiling the split claims in several times.The research on the processing mechanism of some requests in civil law countries such as GermanyandJapanandTaiwan has been relatively mature,andits academic circles have formed views such as affirmative theory,negative theoryandrestrictive theory.Among them,the affirmation holds that it is based on the legal relationship of civil entities The high degree of autonomy of(the principle of autonomy of private law)should allow the parties to freely dispose of litigation rightsanddivide litigation claims; while the negation theory is based on the scope of res judicataandthe principle of good faith,andholds that there is no legal basis for the parties to divide litigation claimsandsue;",
"2、在项目开展中遇到过哪些困难?您是怎么解决的?3、您认为社区哪些纠纷比较突出(列举三个)?这些纠纷有何特点?4、您认为社工在整个项目运作过程中发挥了哪些作用?5、您认为社工在纠纷化解过程中扮演了哪些角色?6、在纠纷化解过程中您认为有哪些因素影响了纠纷的解决?7、请您谈谈社工作为调解员和其他行业的调解员最大的不同?8、截止目前,您认为项目是否达到预期成效并举例说明?58(三)纠纷当事人访谈提纲1、您认为在纠纷化解方面,社区在事前预防(通过开展讲座活动等提高居民法治意识)和事后调解中,更注重哪方面的工作?2、请您谈谈在处理您所遇到的纠纷时,社工作为调解员是否有发挥作用?3、请结合实际谈谈社工作为调解员和其他调解员最大的不同; 4、您认为社工在调解过程中扮演了哪些角色; 四)调解志愿者访谈提纲1、被访人基本信息姓名; 加入调解志愿者年限; 居住小区年限; 职业2、请您谈谈加入调解志愿者后您最大的收获?可举例;",
]