func
string | target
string | cwe
list | project
string | commit_id
string | hash
string | size
int64 | message
string | vul
int64 |
---|---|---|---|---|---|---|---|---|
bool Table_scope_and_contents_source_st::vers_fix_system_fields(
THD *thd, Alter_info *alter_info, const TABLE_LIST &create_table)
{
DBUG_ASSERT(!(alter_info->flags & ALTER_DROP_SYSTEM_VERSIONING));
DBUG_EXECUTE_IF("sysvers_force", if (!tmp_table()) {
alter_info->flags|= ALTER_ADD_SYSTEM_VERSIONING;
options|= HA_VERSIONED_TABLE; });
if (!vers_info.need_check(alter_info))
return false;
const bool add_versioning= alter_info->flags & ALTER_ADD_SYSTEM_VERSIONING;
if (!vers_info.versioned_fields && vers_info.unversioned_fields && !add_versioning)
{
// All is correct but this table is not versioned.
options&= ~HA_VERSIONED_TABLE;
return false;
}
if (!add_versioning && vers_info && !vers_info.versioned_fields)
{
my_error(ER_MISSING, MYF(0), create_table.table_name.str,
"WITH SYSTEM VERSIONING");
return true;
}
List_iterator<Create_field> it(alter_info->create_list);
while (Create_field *f= it++)
{
if (f->vers_sys_field())
continue;
if ((f->versioning == Column_definition::VERSIONING_NOT_SET && !add_versioning) ||
f->versioning == Column_definition::WITHOUT_VERSIONING)
{
f->flags|= VERS_UPDATE_UNVERSIONED_FLAG;
}
} // while (Create_field *f= it++)
if (vers_info.fix_implicit(thd, alter_info))
return true;
return false;
}
|
Safe
|
[
"CWE-416"
] |
server
|
af810407f78b7f792a9bb8c47c8c532eb3b3a758
|
2.443413286538605e+37
| 45 |
MDEV-28098 incorrect key in "dup value" error after long unique
reset errkey after using it, so that it wouldn't affect
the next error message in the next statement
| 0 |
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
return role;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
9f46c187e2e680ecd9de7983e4d081c3391acc76
|
1.2075395280226632e+38
| 11 |
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <kangel@zju.edu.cn>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
| 0 |
void LanLinkProvider::onNetworkChange()
{
if (m_combineBroadcastsTimer.isActive()) {
qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts";
return;
}
m_combineBroadcastsTimer.start();
}
|
Safe
|
[
"CWE-400",
"CWE-703"
] |
kdeconnect-kde
|
4fbd01a3d44a0bcca888c49a77ec7cfd10e113d7
|
3.0708743052707767e+38
| 8 |
Limit identity packets to 8KiB
Healthy identity packages shouldn't be that big and we don't want to
allow systems around us to send us ever humongous packages that will
just leave us without any memory.
Thanks Matthias Gerstner <mgerstner@suse.de> for reporting this.
| 0 |
static int cap_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr,
int shmflg)
{
return 0;
}
|
Safe
|
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
|
1.4715862174734697e+38
| 5 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: James Morris <jmorris@namei.org>
| 0 |
_equalCollateExpr(const CollateExpr *a, const CollateExpr *b)
{
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(collOid);
COMPARE_LOCATION_FIELD(location);
return true;
}
|
Safe
|
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
|
8.80059502230707e+37
| 8 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
| 0 |
int set_stdfds(int fd)
{
int ret;
if (fd < 0)
return -1;
ret = dup2(fd, STDIN_FILENO);
if (ret < 0)
return -1;
ret = dup2(fd, STDOUT_FILENO);
if (ret < 0)
return -1;
ret = dup2(fd, STDERR_FILENO);
if (ret < 0)
return -1;
return 0;
}
|
Safe
|
[
"CWE-417"
] |
lxc
|
5eb45428b312e978fb9e294dde16efb14dd9fa4d
|
1.5768272257188237e+38
| 21 |
CVE 2018-6556: verify netns fd in lxc-user-nic
Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
| 0 |
sec_parse_public_key(STREAM s, uint8 * modulus, uint8 * exponent)
{
uint32 magic, modulus_len;
in_uint32_le(s, magic);
if (magic != SEC_RSA_MAGIC)
{
error("RSA magic 0x%x\n", magic);
return False;
}
in_uint32_le(s, modulus_len);
modulus_len -= SEC_PADDING_SIZE;
if ((modulus_len < SEC_MODULUS_SIZE) || (modulus_len > SEC_MAX_MODULUS_SIZE))
{
error("Bad server public key size (%u bits)\n", modulus_len * 8);
return False;
}
in_uint8s(s, 8); /* modulus_bits, unknown */
in_uint8a(s, exponent, SEC_EXPONENT_SIZE);
in_uint8a(s, modulus, modulus_len);
in_uint8s(s, SEC_PADDING_SIZE);
g_server_public_key_len = modulus_len;
return s_check(s);
}
|
Safe
|
[
"CWE-787"
] |
rdesktop
|
766ebcf6f23ccfe8323ac10242ae6e127d4505d2
|
2.209766063689372e+38
| 27 |
Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182
| 0 |
DebugFileOpen(void)
{
int fd,
istty;
if (OutputFileName[0])
{
/*
* A debug-output file name was given.
*
* Make sure we can write the file, and find out if it's a tty.
*/
if ((fd = open(OutputFileName, O_CREAT | O_APPEND | O_WRONLY,
0666)) < 0)
ereport(FATAL,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", OutputFileName)));
istty = isatty(fd);
close(fd);
/*
* Redirect our stderr to the debug output file.
*/
if (!freopen(OutputFileName, "a", stderr))
ereport(FATAL,
(errcode_for_file_access(),
errmsg("could not reopen file \"%s\" as stderr: %m",
OutputFileName)));
/*
* If the file is a tty and we're running under the postmaster, try to
* send stdout there as well (if it isn't a tty then stderr will block
* out stdout, so we may as well let stdout go wherever it was going
* before).
*/
if (istty && IsUnderPostmaster)
if (!freopen(OutputFileName, "a", stdout))
ereport(FATAL,
(errcode_for_file_access(),
errmsg("could not reopen file \"%s\" as stdout: %m",
OutputFileName)));
}
}
|
Safe
|
[
"CWE-89"
] |
postgres
|
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
|
2.8997703634829305e+37
| 43 |
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
| 0 |
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_alignment_iovec(i);
if (iov_iter_is_bvec(i))
return iov_iter_alignment_bvec(i);
if (iov_iter_is_pipe(i)) {
unsigned int p_mask = i->pipe->ring_size - 1;
size_t size = i->count;
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
}
if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
return 0;
}
|
Safe
|
[
"CWE-665",
"CWE-284"
] |
linux
|
9d2231c5d74e13b2a0546fee6737ee4446017903
|
1.7695241746366264e+38
| 23 |
lib/iov_iter: initialize "flags" in new pipe_buffer
The functions copy_page_to_iter_pipe() and push_pipe() can both
allocate a new pipe_buffer, but the "flags" member initializer is
missing.
Fixes: 241699cd72a8 ("new iov_iter flavour: pipe-backed")
To: Alexander Viro <viro@zeniv.linux.org.uk>
To: linux-fsdevel@vger.kernel.org
To: linux-kernel@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
| 0 |
static int ISO8859_14ToUTF8 (unsigned char* out, int *outlen,
const unsigned char* in, int *inlen) {
return ISO8859xToUTF8 (out, outlen, in, inlen, xmlunicodetable_ISO8859_14);
}
|
Safe
|
[
"CWE-189"
] |
libxml2
|
69f04562f75212bfcabecd190ea8b06ace28ece2
|
1.2068098450613881e+38
| 4 |
Fix an off by one error in encoding
this off by one error doesn't seems to reproduce on linux
but the error is real.
| 0 |
static void rds6_conn_message_info_retrans(struct socket *sock,
unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds6_conn_message_info(sock, len, iter, lens, 0);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
5f9562ebe710c307adc5f666bf1a2162ee7977c0
|
1.2692792391310435e+37
| 7 |
rds: memory leak in __rds_conn_create()
__rds_conn_create() did not release conn->c_path when loop_trans != 0 and
trans->t_prefer_loopback != 0 and is_outgoing == 0.
Fixes: aced3ce57cd3 ("RDS tcp loopback connection can hang")
Signed-off-by: Hangyu Hua <hbh25y@gmail.com>
Reviewed-by: Sharath Srinivasan <sharath.srinivasan@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
Header::find (const char name[]) const
{
return _map.find (name);
}
|
Safe
|
[
"CWE-125"
] |
openexr
|
e79d2296496a50826a15c667bf92bdc5a05518b4
|
1.8086013388639575e+38
| 4 |
fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <peterh@wetafx.co.nz>
| 0 |
const char *Sys_Basename( char *path )
{
return basename( path );
}
|
Safe
|
[
"CWE-59"
] |
ioq3
|
b5acc31a4da72cc3a4a6d88facb15b6214d745c6
|
1.8841876951049785e+38
| 4 |
CVE-2012-3345
| 0 |
receive(
struct recvbuf *rbufp
)
{
register struct peer *peer; /* peer structure pointer */
register struct pkt *pkt; /* receive packet pointer */
u_char hisversion; /* packet version */
u_char hisleap; /* packet leap indicator */
u_char hismode; /* packet mode */
u_char hisstratum; /* packet stratum */
u_short restrict_mask; /* restrict bits */
const char *hm_str; /* hismode string */
const char *am_str; /* association match string */
int kissCode = NOKISS; /* Kiss Code */
int has_mac; /* length of MAC field */
int authlen; /* offset of MAC field */
int is_authentic = 0; /* cryptosum ok */
int retcode = AM_NOMATCH; /* match code */
keyid_t skeyid = 0; /* key IDs */
u_int32 opcode = 0; /* extension field opcode */
sockaddr_u *dstadr_sin; /* active runway */
struct peer *peer2; /* aux peer structure pointer */
endpt *match_ep; /* newpeer() local address */
l_fp p_org; /* origin timestamp */
l_fp p_rec; /* receive timestamp */
l_fp p_xmt; /* transmit timestamp */
#ifdef AUTOKEY
char hostname[NTP_MAXSTRLEN + 1];
char *groupname = NULL;
struct autokey *ap; /* autokey structure pointer */
int rval; /* cookie snatcher */
keyid_t pkeyid = 0, tkeyid = 0; /* key IDs */
#endif /* AUTOKEY */
#ifdef HAVE_NTP_SIGND
static unsigned char zero_key[16];
#endif /* HAVE_NTP_SIGND */
/*
* Monitor the packet and get restrictions. Note that the packet
* length for control and private mode packets must be checked
* by the service routines. Some restrictions have to be handled
* later in order to generate a kiss-o'-death packet.
*/
/*
* Bogus port check is before anything, since it probably
* reveals a clogging attack.
*/
sys_received++;
if (0 == SRCPORT(&rbufp->recv_srcadr)) {
sys_badlength++;
return; /* bogus port */
}
restrict_mask = restrictions(&rbufp->recv_srcadr);
pkt = &rbufp->recv_pkt;
DPRINTF(2, ("receive: at %ld %s<-%s flags %x restrict %03x org %#010x.%08x xmt %#010x.%08x\n",
current_time, stoa(&rbufp->dstadr->sin),
stoa(&rbufp->recv_srcadr), rbufp->dstadr->flags,
restrict_mask, ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf),
ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)));
hisversion = PKT_VERSION(pkt->li_vn_mode);
hisleap = PKT_LEAP(pkt->li_vn_mode);
hismode = (int)PKT_MODE(pkt->li_vn_mode);
hisstratum = PKT_TO_STRATUM(pkt->stratum);
if (restrict_mask & RES_IGNORE) {
sys_restricted++;
return; /* ignore everything */
}
if (hismode == MODE_PRIVATE) {
if (!ntp_mode7 || (restrict_mask & RES_NOQUERY)) {
sys_restricted++;
return; /* no query private */
}
process_private(rbufp, ((restrict_mask &
RES_NOMODIFY) == 0));
return;
}
if (hismode == MODE_CONTROL) {
if (restrict_mask & RES_NOQUERY) {
sys_restricted++;
return; /* no query control */
}
process_control(rbufp, restrict_mask);
return;
}
if (restrict_mask & RES_DONTSERVE) {
sys_restricted++;
return; /* no time serve */
}
/*
* This is for testing. If restricted drop ten percent of
* surviving packets.
*/
if (restrict_mask & RES_FLAKE) {
if ((double)ntp_random() / 0x7fffffff < .1) {
sys_restricted++;
return; /* no flakeway */
}
}
/*
* Version check must be after the query packets, since they
* intentionally use an early version.
*/
if (hisversion == NTP_VERSION) {
sys_newversion++; /* new version */
} else if ( !(restrict_mask & RES_VERSION)
&& hisversion >= NTP_OLDVERSION) {
sys_oldversion++; /* previous version */
} else {
sys_badlength++;
return; /* old version */
}
/*
* Figure out his mode and validate the packet. This has some
* legacy raunch that probably should be removed. In very early
* NTP versions mode 0 was equivalent to what later versions
* would interpret as client mode.
*/
if (hismode == MODE_UNSPEC) {
if (hisversion == NTP_OLDVERSION) {
hismode = MODE_CLIENT;
} else {
sys_badlength++;
return; /* invalid mode */
}
}
/*
* Parse the extension field if present. We figure out whether
* an extension field is present by measuring the MAC size. If
* the number of words following the packet header is 0, no MAC
* is present and the packet is not authenticated. If 1, the
* packet is a crypto-NAK; if 3, the packet is authenticated
* with DES; if 5, the packet is authenticated with MD5; if 6,
* the packet is authenticated with SHA. If 2 or * 4, the packet
* is a runt and discarded forthwith. If greater than 6, an
* extension field is present, so we subtract the length of the
* field and go around again.
*/
authlen = LEN_PKT_NOMAC;
has_mac = rbufp->recv_length - authlen;
while (has_mac > 0) {
u_int32 len;
#ifdef AUTOKEY
u_int32 hostlen;
struct exten *ep;
#endif /*AUTOKEY */
if (has_mac % 4 != 0 || has_mac < (int)MIN_MAC_LEN) {
sys_badlength++;
return; /* bad length */
}
if (has_mac <= (int)MAX_MAC_LEN) {
skeyid = ntohl(((u_int32 *)pkt)[authlen / 4]);
break;
} else {
opcode = ntohl(((u_int32 *)pkt)[authlen / 4]);
len = opcode & 0xffff;
if ( len % 4 != 0
|| len < 4
|| (int)len + authlen > rbufp->recv_length) {
sys_badlength++;
return; /* bad length */
}
#ifdef AUTOKEY
/*
* Extract calling group name for later. If
* sys_groupname is non-NULL, there must be
* a group name provided to elicit a response.
*/
if ( (opcode & 0x3fff0000) == CRYPTO_ASSOC
&& sys_groupname != NULL) {
ep = (struct exten *)&((u_int32 *)pkt)[authlen / 4];
hostlen = ntohl(ep->vallen);
if ( hostlen >= sizeof(hostname)
|| hostlen > len -
offsetof(struct exten, pkt)) {
sys_badlength++;
return; /* bad length */
}
memcpy(hostname, &ep->pkt, hostlen);
hostname[hostlen] = '\0';
groupname = strchr(hostname, '@');
if (groupname == NULL) {
sys_declined++;
return;
}
groupname++;
}
#endif /* AUTOKEY */
authlen += len;
has_mac -= len;
}
}
/*
* If has_mac is < 0 we had a malformed packet.
*/
if (has_mac < 0) {
sys_badlength++;
return; /* bad length */
}
/*
* If authentication required, a MAC must be present.
*/
if (restrict_mask & RES_DONTTRUST && has_mac == 0) {
sys_restricted++;
return; /* access denied */
}
/*
* Update the MRU list and finger the cloggers. It can be a
* little expensive, so turn it off for production use.
* RES_LIMITED and RES_KOD will be cleared in the returned
* restrict_mask unless one or both actions are warranted.
*/
restrict_mask = ntp_monitor(rbufp, restrict_mask);
if (restrict_mask & RES_LIMITED) {
sys_limitrejected++;
if ( !(restrict_mask & RES_KOD)
|| MODE_BROADCAST == hismode
|| MODE_SERVER == hismode) {
if (MODE_SERVER == hismode)
DPRINTF(1, ("Possibly self-induced rate limiting of MODE_SERVER from %s\n",
stoa(&rbufp->recv_srcadr)));
return; /* rate exceeded */
}
if (hismode == MODE_CLIENT)
fast_xmit(rbufp, MODE_SERVER, skeyid,
restrict_mask);
else
fast_xmit(rbufp, MODE_ACTIVE, skeyid,
restrict_mask);
return; /* rate exceeded */
}
restrict_mask &= ~RES_KOD;
/*
* We have tossed out as many buggy packets as possible early in
* the game to reduce the exposure to a clogging attack. Now we
* have to burn some cycles to find the association and
* authenticate the packet if required. Note that we burn only
* digest cycles, again to reduce exposure. There may be no
* matching association and that's okay.
*
* More on the autokey mambo. Normally the local interface is
* found when the association was mobilized with respect to a
* designated remote address. We assume packets arriving from
* the remote address arrive via this interface and the local
* address used to construct the autokey is the unicast address
* of the interface. However, if the sender is a broadcaster,
* the interface broadcast address is used instead.
* Notwithstanding this technobabble, if the sender is a
* multicaster, the broadcast address is null, so we use the
* unicast address anyway. Don't ask.
*/
peer = findpeer(rbufp, hismode, &retcode);
dstadr_sin = &rbufp->dstadr->sin;
NTOHL_FP(&pkt->org, &p_org);
NTOHL_FP(&pkt->rec, &p_rec);
NTOHL_FP(&pkt->xmt, &p_xmt);
hm_str = modetoa(hismode);
am_str = amtoa(retcode);
/*
* Authentication is conditioned by three switches:
*
* NOPEER (RES_NOPEER) do not mobilize an association unless
* authenticated
* NOTRUST (RES_DONTTRUST) do not allow access unless
* authenticated (implies NOPEER)
* enable (sys_authenticate) master NOPEER switch, by default
* on
*
* The NOPEER and NOTRUST can be specified on a per-client basis
* using the restrict command. The enable switch if on implies
* NOPEER for all clients. There are four outcomes:
*
* NONE The packet has no MAC.
* OK the packet has a MAC and authentication succeeds
* ERROR the packet has a MAC and authentication fails
* CRYPTO crypto-NAK. The MAC has four octets only.
*
* Note: The AUTH(x, y) macro is used to filter outcomes. If x
* is zero, acceptable outcomes of y are NONE and OK. If x is
* one, the only acceptable outcome of y is OK.
*/
if (has_mac == 0) {
restrict_mask &= ~RES_MSSNTP;
is_authentic = AUTH_NONE; /* not required */
DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s len %d org %#010x.%08x xmt %#010x.%08x NOMAC\n",
current_time, stoa(dstadr_sin),
stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str,
authlen,
ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf),
ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)));
} else if (has_mac == 4) {
restrict_mask &= ~RES_MSSNTP;
is_authentic = AUTH_CRYPTO; /* crypto-NAK */
DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x MAC4\n",
current_time, stoa(dstadr_sin),
stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str,
skeyid, authlen + has_mac, is_authentic,
ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf),
ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)));
#ifdef HAVE_NTP_SIGND
/*
* If the signature is 20 bytes long, the last 16 of
* which are zero, then this is a Microsoft client
* wanting AD-style authentication of the server's
* reply.
*
* This is described in Microsoft's WSPP docs, in MS-SNTP:
* http://msdn.microsoft.com/en-us/library/cc212930.aspx
*/
} else if ( has_mac == MAX_MD5_LEN
&& (restrict_mask & RES_MSSNTP)
&& (retcode == AM_FXMIT || retcode == AM_NEWPASS)
&& (memcmp(zero_key, (char *)pkt + authlen + 4,
MAX_MD5_LEN - 4) == 0)) {
is_authentic = AUTH_NONE;
#endif /* HAVE_NTP_SIGND */
} else {
restrict_mask &= ~RES_MSSNTP;
#ifdef AUTOKEY
/*
* For autokey modes, generate the session key
* and install in the key cache. Use the socket
* broadcast or unicast address as appropriate.
*/
if (crypto_flags && skeyid > NTP_MAXKEY) {
/*
* More on the autokey dance (AKD). A cookie is
* constructed from public and private values.
* For broadcast packets, the cookie is public
* (zero). For packets that match no
* association, the cookie is hashed from the
* addresses and private value. For server
* packets, the cookie was previously obtained
* from the server. For symmetric modes, the
* cookie was previously constructed using an
* agreement protocol; however, should PKI be
* unavailable, we construct a fake agreement as
* the EXOR of the peer and host cookies.
*
* hismode ephemeral persistent
* =======================================
* active 0 cookie#
* passive 0% cookie#
* client sys cookie 0%
* server 0% sys cookie
* broadcast 0 0
*
* # if unsync, 0
* % can't happen
*/
if (has_mac < (int)MAX_MD5_LEN) {
sys_badauth++;
return;
}
if (hismode == MODE_BROADCAST) {
/*
* For broadcaster, use the interface
* broadcast address when available;
* otherwise, use the unicast address
* found when the association was
* mobilized. However, if this is from
* the wildcard interface, game over.
*/
if ( crypto_flags
&& rbufp->dstadr ==
ANY_INTERFACE_CHOOSE(&rbufp->recv_srcadr)) {
sys_restricted++;
return; /* no wildcard */
}
pkeyid = 0;
if (!SOCK_UNSPEC(&rbufp->dstadr->bcast))
dstadr_sin =
&rbufp->dstadr->bcast;
} else if (peer == NULL) {
pkeyid = session_key(
&rbufp->recv_srcadr, dstadr_sin, 0,
sys_private, 0);
} else {
pkeyid = peer->pcookie;
}
/*
* The session key includes both the public
* values and cookie. In case of an extension
* field, the cookie used for authentication
* purposes is zero. Note the hash is saved for
* use later in the autokey mambo.
*/
if (authlen > (int)LEN_PKT_NOMAC && pkeyid != 0) {
session_key(&rbufp->recv_srcadr,
dstadr_sin, skeyid, 0, 2);
tkeyid = session_key(
&rbufp->recv_srcadr, dstadr_sin,
skeyid, pkeyid, 0);
} else {
tkeyid = session_key(
&rbufp->recv_srcadr, dstadr_sin,
skeyid, pkeyid, 2);
}
}
#endif /* AUTOKEY */
/*
* Compute the cryptosum. Note a clogging attack may
* succeed in bloating the key cache. If an autokey,
* purge it immediately, since we won't be needing it
* again. If the packet is authentic, it can mobilize an
* association. Note that there is no key zero.
*/
if (!authdecrypt(skeyid, (u_int32 *)pkt, authlen,
has_mac))
is_authentic = AUTH_ERROR;
else
is_authentic = AUTH_OK;
#ifdef AUTOKEY
if (crypto_flags && skeyid > NTP_MAXKEY)
authtrust(skeyid, 0);
#endif /* AUTOKEY */
DPRINTF(2, ("receive: at %ld %s<-%s mode %d/%s:%s keyid %08x len %d auth %d org %#010x.%08x xmt %#010x.%08x\n",
current_time, stoa(dstadr_sin),
stoa(&rbufp->recv_srcadr), hismode, hm_str, am_str,
skeyid, authlen + has_mac, is_authentic,
ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf),
ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf)));
}
/*
* The association matching rules are implemented by a set of
* routines and an association table. A packet matching an
* association is processed by the peer process for that
* association. If there are no errors, an ephemeral association
* is mobilized: a broadcast packet mobilizes a broadcast client
* aassociation; a manycast server packet mobilizes a manycast
* client association; a symmetric active packet mobilizes a
* symmetric passive association.
*/
switch (retcode) {
/*
* This is a client mode packet not matching any association. If
* an ordinary client, simply toss a server mode packet back
* over the fence. If a manycast client, we have to work a
* little harder.
*/
case AM_FXMIT:
/*
* If authentication OK, send a server reply; otherwise,
* send a crypto-NAK.
*/
if (!(rbufp->dstadr->flags & INT_MCASTOPEN)) {
if (AUTH(restrict_mask & RES_DONTTRUST,
is_authentic)) {
fast_xmit(rbufp, MODE_SERVER, skeyid,
restrict_mask);
} else if (is_authentic == AUTH_ERROR) {
fast_xmit(rbufp, MODE_SERVER, 0,
restrict_mask);
sys_badauth++;
} else {
sys_restricted++;
}
return; /* hooray */
}
/*
* This must be manycast. Do not respond if not
* configured as a manycast server.
*/
if (!sys_manycastserver) {
sys_restricted++;
return; /* not enabled */
}
#ifdef AUTOKEY
/*
* Do not respond if not the same group.
*/
if (group_test(groupname, NULL)) {
sys_declined++;
return;
}
#endif /* AUTOKEY */
/*
* Do not respond if we are not synchronized or our
* stratum is greater than the manycaster or the
* manycaster has already synchronized to us.
*/
if ( sys_leap == LEAP_NOTINSYNC
|| sys_stratum >= hisstratum
|| (!sys_cohort && sys_stratum == hisstratum + 1)
|| rbufp->dstadr->addr_refid == pkt->refid) {
sys_declined++;
return; /* no help */
}
/*
* Respond only if authentication succeeds. Don't do a
* crypto-NAK, as that would not be useful.
*/
if (AUTH(restrict_mask & RES_DONTTRUST, is_authentic))
fast_xmit(rbufp, MODE_SERVER, skeyid,
restrict_mask);
return; /* hooray */
/*
* This is a server mode packet returned in response to a client
* mode packet sent to a multicast group address (for
* manycastclient) or to a unicast address (for pool). The
* origin timestamp is a good nonce to reliably associate the
* reply with what was sent. If there is no match, that's
* curious and could be an intruder attempting to clog, so we
* just ignore it.
*
* If the packet is authentic and the manycastclient or pool
* association is found, we mobilize a client association and
* copy pertinent variables from the manycastclient or pool
* association to the new client association. If not, just
* ignore the packet.
*
* There is an implosion hazard at the manycast client, since
* the manycast servers send the server packet immediately. If
* the guy is already here, don't fire up a duplicate.
*/
case AM_MANYCAST:
#ifdef AUTOKEY
/*
* Do not respond if not the same group.
*/
if (group_test(groupname, NULL)) {
sys_declined++;
return;
}
#endif /* AUTOKEY */
if ((peer2 = findmanycastpeer(rbufp)) == NULL) {
sys_restricted++;
return; /* not enabled */
}
if (!AUTH( (!(peer2->cast_flags & MDF_POOL)
&& sys_authenticate)
|| (restrict_mask & (RES_NOPEER |
RES_DONTTRUST)), is_authentic)) {
sys_restricted++;
return; /* access denied */
}
/*
* Do not respond if unsynchronized or stratum is below
* the floor or at or above the ceiling.
*/
if ( hisleap == LEAP_NOTINSYNC
|| hisstratum < sys_floor
|| hisstratum >= sys_ceiling) {
sys_declined++;
return; /* no help */
}
peer = newpeer(&rbufp->recv_srcadr, NULL, rbufp->dstadr,
MODE_CLIENT, hisversion, peer2->minpoll,
peer2->maxpoll, FLAG_PREEMPT |
(FLAG_IBURST & peer2->flags), MDF_UCAST |
MDF_UCLNT, 0, skeyid, sys_ident);
if (NULL == peer) {
sys_declined++;
return; /* ignore duplicate */
}
/*
* After each ephemeral pool association is spun,
* accelerate the next poll for the pool solicitor so
* the pool will fill promptly.
*/
if (peer2->cast_flags & MDF_POOL)
peer2->nextdate = current_time + 1;
/*
* Further processing of the solicitation response would
* simply detect its origin timestamp as bogus for the
* brand-new association (it matches the prototype
* association) and tinker with peer->nextdate delaying
* first sync.
*/
return; /* solicitation response handled */
/*
* This is the first packet received from a broadcast server. If
* the packet is authentic and we are enabled as broadcast
* client, mobilize a broadcast client association. We don't
* kiss any frogs here.
*/
case AM_NEWBCL:
#ifdef AUTOKEY
/*
* Do not respond if not the same group.
*/
if (group_test(groupname, sys_ident)) {
sys_declined++;
return;
}
#endif /* AUTOKEY */
if (sys_bclient == 0) {
sys_restricted++;
return; /* not enabled */
}
if (!AUTH(sys_authenticate | (restrict_mask &
(RES_NOPEER | RES_DONTTRUST)), is_authentic)) {
sys_restricted++;
return; /* access denied */
}
/*
* Do not respond if unsynchronized or stratum is below
* the floor or at or above the ceiling.
*/
if ( hisleap == LEAP_NOTINSYNC
|| hisstratum < sys_floor
|| hisstratum >= sys_ceiling) {
sys_declined++;
return; /* no help */
}
#ifdef AUTOKEY
/*
* Do not respond if Autokey and the opcode is not a
* CRYPTO_ASSOC response with association ID.
*/
if ( crypto_flags && skeyid > NTP_MAXKEY
&& (opcode & 0xffff0000) != (CRYPTO_ASSOC | CRYPTO_RESP)) {
sys_declined++;
return; /* protocol error */
}
#endif /* AUTOKEY */
/*
* Broadcasts received via a multicast address may
* arrive after a unicast volley has begun
* with the same remote address. newpeer() will not
* find duplicate associations on other local endpoints
* if a non-NULL endpoint is supplied. multicastclient
* ephemeral associations are unique across all local
* endpoints.
*/
if (!(INT_MCASTOPEN & rbufp->dstadr->flags))
match_ep = rbufp->dstadr;
else
match_ep = NULL;
/*
* Determine whether to execute the initial volley.
*/
if (sys_bdelay != 0) {
#ifdef AUTOKEY
/*
* If a two-way exchange is not possible,
* neither is Autokey.
*/
if (crypto_flags && skeyid > NTP_MAXKEY) {
sys_restricted++;
return; /* no autokey */
}
#endif /* AUTOKEY */
/*
* Do not execute the volley. Start out in
* broadcast client mode.
*/
peer = newpeer(&rbufp->recv_srcadr, NULL,
match_ep, MODE_BCLIENT, hisversion,
pkt->ppoll, pkt->ppoll, FLAG_PREEMPT,
MDF_BCLNT, 0, skeyid, sys_ident);
if (NULL == peer) {
sys_restricted++;
return; /* ignore duplicate */
} else {
peer->delay = sys_bdelay;
peer->bxmt = p_xmt;
}
break;
}
/*
* Execute the initial volley in order to calibrate the
* propagation delay and run the Autokey protocol.
*
* Note that the minpoll is taken from the broadcast
* packet, normally 6 (64 s) and that the poll interval
* is fixed at this value.
*/
peer = newpeer(&rbufp->recv_srcadr, NULL, match_ep,
MODE_CLIENT, hisversion, pkt->ppoll, pkt->ppoll,
FLAG_BC_VOL | FLAG_IBURST | FLAG_PREEMPT, MDF_BCLNT,
0, skeyid, sys_ident);
if (NULL == peer) {
sys_restricted++;
return; /* ignore duplicate */
}
peer->bxmt = p_xmt;
#ifdef AUTOKEY
if (skeyid > NTP_MAXKEY)
crypto_recv(peer, rbufp);
#endif /* AUTOKEY */
return; /* hooray */
/*
* This is the first packet received from a symmetric active
* peer. If the packet is authentic and the first he sent,
* mobilize a passive association. If not, kiss the frog.
*/
case AM_NEWPASS:
#ifdef AUTOKEY
/*
* Do not respond if not the same group.
*/
if (group_test(groupname, sys_ident)) {
sys_declined++;
return;
}
#endif /* AUTOKEY */
if (!AUTH(sys_authenticate | (restrict_mask &
(RES_NOPEER | RES_DONTTRUST)), is_authentic)) {
/*
* If authenticated but cannot mobilize an
* association, send a symmetric passive
* response without mobilizing an association.
* This is for drat broken Windows clients. See
* Microsoft KB 875424 for preferred workaround.
*/
if (AUTH(restrict_mask & RES_DONTTRUST,
is_authentic)) {
fast_xmit(rbufp, MODE_PASSIVE, skeyid,
restrict_mask);
return; /* hooray */
}
if (is_authentic == AUTH_ERROR) {
fast_xmit(rbufp, MODE_ACTIVE, 0,
restrict_mask);
sys_restricted++;
return;
}
/* [Bug 2941]
* If we got here, the packet isn't part of an
* existing association, it isn't correctly
* authenticated, and it didn't meet either of
* the previous two special cases so we should
* just drop it on the floor. For example,
* crypto-NAKs (is_authentic == AUTH_CRYPTO)
* will make it this far. This is just
* debug-printed and not logged to avoid log
* flooding.
*/
DPRINTF(2, ("receive: at %ld refusing to mobilize passive association"
" with unknown peer %s mode %d/%s:%s keyid %08x len %d auth %d\n",
current_time, stoa(&rbufp->recv_srcadr),
hismode, hm_str, am_str, skeyid,
(authlen + has_mac), is_authentic));
sys_declined++;
return;
}
/*
* Do not respond if synchronized and if stratum is
* below the floor or at or above the ceiling. Note,
* this allows an unsynchronized peer to synchronize to
* us. It would be very strange if he did and then was
* nipped, but that could only happen if we were
* operating at the top end of the range. It also means
* we will spin an ephemeral association in response to
* MODE_ACTIVE KoDs, which will time out eventually.
*/
if ( hisleap != LEAP_NOTINSYNC
&& (hisstratum < sys_floor || hisstratum >= sys_ceiling)) {
sys_declined++;
return; /* no help */
}
/*
* The message is correctly authenticated and allowed.
* Mobilize a symmetric passive association.
*/
if ((peer = newpeer(&rbufp->recv_srcadr, NULL,
rbufp->dstadr, MODE_PASSIVE, hisversion, pkt->ppoll,
NTP_MAXDPOLL, 0, MDF_UCAST, 0, skeyid,
sys_ident)) == NULL) {
sys_declined++;
return; /* ignore duplicate */
}
break;
/*
* Process regular packet. Nothing special.
*/
case AM_PROCPKT:
#ifdef AUTOKEY
/*
* Do not respond if not the same group.
*/
if (group_test(groupname, peer->ident)) {
sys_declined++;
return;
}
#endif /* AUTOKEY */
if (MODE_BROADCAST == hismode) {
u_char poll;
int bail = 0;
l_fp tdiff;
DPRINTF(2, ("receive: PROCPKT/BROADCAST: prev pkt %ld seconds ago, ppoll: %d, %d secs\n",
(current_time - peer->timelastrec),
peer->ppoll, (1 << peer->ppoll)
));
/* Things we can check:
*
* Did the poll interval change?
* Is the poll interval in the packet in-range?
* Did this packet arrive too soon?
* Is the timestamp in this packet monotonic
* with respect to the previous packet?
*/
/* This is noteworthy, not error-worthy */
if (pkt->ppoll != peer->ppoll) {
msyslog(LOG_INFO, "receive: broadcast poll from %s changed from %ud to %ud",
stoa(&rbufp->recv_srcadr),
peer->ppoll, pkt->ppoll);
}
poll = min(peer->maxpoll,
max(peer->minpoll, pkt->ppoll));
/* This is error-worthy */
if (pkt->ppoll != poll) {
msyslog(LOG_INFO, "receive: broadcast poll of %ud from %s is out-of-range (%d to %d)!",
pkt->ppoll, stoa(&rbufp->recv_srcadr),
peer->minpoll, peer->maxpoll);
++bail;
}
if ( (current_time - peer->timelastrec)
< (1 << pkt->ppoll)) {
msyslog(LOG_INFO, "receive: broadcast packet from %s arrived after %ld, not %d seconds!",
stoa(&rbufp->recv_srcadr),
(current_time - peer->timelastrec),
(1 << pkt->ppoll)
);
++bail;
}
tdiff = p_xmt;
L_SUB(&tdiff, &peer->bxmt);
if (tdiff.l_i < 0) {
msyslog(LOG_INFO, "receive: broadcast packet from %s contains non-monotonic timestamp: %#010x.%08x -> %#010x.%08x",
stoa(&rbufp->recv_srcadr),
peer->bxmt.l_ui, peer->bxmt.l_uf,
p_xmt.l_ui, p_xmt.l_uf
);
++bail;
}
peer->bxmt = p_xmt;
if (bail) {
peer->timelastrec = current_time;
sys_declined++;
return;
}
}
break;
/*
* A passive packet matches a passive association. This is
* usually the result of reconfiguring a client on the fly. As
* this association might be legitimate and this packet an
* attempt to deny service, just ignore it.
*/
case AM_ERR:
sys_declined++;
return;
/*
* For everything else there is the bit bucket.
*/
default:
sys_declined++;
return;
}
#ifdef AUTOKEY
/*
* If the association is configured for Autokey, the packet must
* have a public key ID; if not, the packet must have a
* symmetric key ID.
*/
if ( is_authentic != AUTH_CRYPTO
&& ( ((peer->flags & FLAG_SKEY) && skeyid <= NTP_MAXKEY)
|| (!(peer->flags & FLAG_SKEY) && skeyid > NTP_MAXKEY))) {
sys_badauth++;
return;
}
#endif /* AUTOKEY */
peer->received++;
peer->flash &= ~PKT_TEST_MASK;
if (peer->flags & FLAG_XBOGUS) {
peer->flags &= ~FLAG_XBOGUS;
peer->flash |= TEST3;
}
/*
* Next comes a rigorous schedule of timestamp checking. If the
* transmit timestamp is zero, the server has not initialized in
* interleaved modes or is horribly broken.
*/
if (L_ISZERO(&p_xmt)) {
peer->flash |= TEST3; /* unsynch */
/*
* If the transmit timestamp duplicates a previous one, the
* packet is a replay. This prevents the bad guys from replaying
* the most recent packet, authenticated or not.
*/
} else if (L_ISEQU(&peer->xmt, &p_xmt)) {
peer->flash |= TEST1; /* duplicate */
peer->oldpkt++;
return;
/*
* If this is a broadcast mode packet, skip further checking. If
* an initial volley, bail out now and let the client do its
* stuff. If the origin timestamp is nonzero, this is an
* interleaved broadcast. so restart the protocol.
*/
} else if (hismode == MODE_BROADCAST) {
if (!L_ISZERO(&p_org) && !(peer->flags & FLAG_XB)) {
peer->flags |= FLAG_XB;
peer->aorg = p_xmt;
peer->borg = rbufp->recv_time;
report_event(PEVNT_XLEAVE, peer, NULL);
return;
}
/*
* Basic mode checks:
*
* If there is no origin timestamp, it's an initial packet.
*
* Otherwise, check for bogus packet in basic mode.
* If it is bogus, switch to interleaved mode and resynchronize,
* but only after confirming the packet is not bogus in
* symmetric interleaved mode.
*
* This could also mean somebody is forging packets claiming to
* be from us, attempting to cause our server to KoD us.
*/
} else if (peer->flip == 0) {
if (0 < hisstratum && L_ISZERO(&p_org)) {
L_CLR(&peer->aorg);
} else if (!L_ISEQU(&p_org, &peer->aorg)) {
peer->bogusorg++;
peer->flash |= TEST2; /* bogus */
msyslog(LOG_INFO,
"receive: Unexpected origin timestamp %#010x.%08x from %s xmt %#010x.%08x",
ntohl(pkt->org.l_ui), ntohl(pkt->org.l_uf),
ntoa(&peer->srcadr),
ntohl(pkt->xmt.l_ui), ntohl(pkt->xmt.l_uf));
if ( !L_ISZERO(&peer->dst)
&& L_ISEQU(&p_org, &peer->dst)) {
/* Might be the start of an interleave */
peer->flip = 1;
report_event(PEVNT_XLEAVE, peer, NULL);
}
return; /* Bogus or possible interleave packet */
} else {
L_CLR(&peer->aorg);
}
/*
* Check for valid nonzero timestamp fields.
*/
} else if (L_ISZERO(&p_org) || L_ISZERO(&p_rec) ||
L_ISZERO(&peer->dst)) {
peer->flash |= TEST3; /* unsynch */
/*
* Check for bogus packet in interleaved symmetric mode. This
* can happen if a packet is lost, duplicated or crossed. If
* found, flip and resynchronize.
*/
} else if ( !L_ISZERO(&peer->dst)
&& !L_ISEQU(&p_org, &peer->dst)) {
peer->bogusorg++;
peer->flags |= FLAG_XBOGUS;
peer->flash |= TEST2; /* bogus */
return; /* Bogus packet, we are done */
}
/*
* If this is a crypto_NAK, the server cannot authenticate a
* client packet. The server might have just changed keys. Clear
* the association and restart the protocol.
*/
if (is_authentic == AUTH_CRYPTO) {
report_event(PEVNT_AUTH, peer, "crypto_NAK");
peer->flash |= TEST5; /* bad auth */
peer->badauth++;
if (peer->flags & FLAG_PREEMPT) {
unpeer(peer);
return;
}
#ifdef AUTOKEY
if (peer->crypto)
peer_clear(peer, "AUTH");
#endif /* AUTOKEY */
return;
/*
* If the digest fails or it's missing for authenticated
* associations, the client cannot authenticate a server
* reply to a client packet previously sent. The loopback check
* is designed to avoid a bait-and-switch attack, which was
* possible in past versions. If symmetric modes, return a
* crypto-NAK. The peer should restart the protocol.
*/
} else if (!AUTH(peer->keyid || has_mac ||
(restrict_mask & RES_DONTTRUST), is_authentic)) {
report_event(PEVNT_AUTH, peer, "digest");
peer->flash |= TEST5; /* bad auth */
peer->badauth++;
if ( has_mac
&& (hismode == MODE_ACTIVE || hismode == MODE_PASSIVE))
fast_xmit(rbufp, MODE_ACTIVE, 0, restrict_mask);
if (peer->flags & FLAG_PREEMPT) {
unpeer(peer);
return;
}
#ifdef AUTOKEY
if (peer->crypto)
peer_clear(peer, "AUTH");
#endif /* AUTOKEY */
return;
}
/*
* Update the state variables.
*/
if (peer->flip == 0) {
if (hismode != MODE_BROADCAST)
peer->rec = p_xmt;
peer->dst = rbufp->recv_time;
}
peer->xmt = p_xmt;
/*
* Set the peer ppoll to the maximum of the packet ppoll and the
* peer minpoll. If a kiss-o'-death, set the peer minpoll to
* this maximum and advance the headway to give the sender some
* headroom. Very intricate.
*/
/*
* Check for any kiss codes. Note this is only used when a server
* responds to a packet request
*/
kissCode = kiss_code_check(hisleap, hisstratum, hismode, pkt->refid);
/*
* Check to see if this is a RATE Kiss Code
* Currently this kiss code will accept whatever poll
* rate that the server sends
*/
peer->ppoll = max(peer->minpoll, pkt->ppoll);
if (kissCode == RATEKISS) {
peer->selbroken++; /* Increment the KoD count */
report_event(PEVNT_RATE, peer, NULL);
if (pkt->ppoll > peer->minpoll)
peer->minpoll = peer->ppoll;
peer->burst = peer->retry = 0;
peer->throttle = (NTP_SHIFT + 1) * (1 << peer->minpoll);
poll_update(peer, pkt->ppoll);
return; /* kiss-o'-death */
}
if (kissCode != NOKISS) {
peer->selbroken++; /* Increment the KoD count */
return; /* Drop any other kiss code packets */
}
/*
* That was hard and I am sweaty, but the packet is squeaky
* clean. Get on with real work.
*/
peer->timereceived = current_time;
peer->timelastrec = current_time;
if (is_authentic == AUTH_OK)
peer->flags |= FLAG_AUTHENTIC;
else
peer->flags &= ~FLAG_AUTHENTIC;
#ifdef AUTOKEY
/*
* More autokey dance. The rules of the cha-cha are as follows:
*
* 1. If there is no key or the key is not auto, do nothing.
*
* 2. If this packet is in response to the one just previously
* sent or from a broadcast server, do the extension fields.
* Otherwise, assume bogosity and bail out.
*
* 3. If an extension field contains a verified signature, it is
* self-authenticated and we sit the dance.
*
* 4. If this is a server reply, check only to see that the
* transmitted key ID matches the received key ID.
*
* 5. Check to see that one or more hashes of the current key ID
* matches the previous key ID or ultimate original key ID
* obtained from the broadcaster or symmetric peer. If no
* match, sit the dance and call for new autokey values.
*
* In case of crypto error, fire the orchestra, stop dancing and
* restart the protocol.
*/
if (peer->flags & FLAG_SKEY) {
/*
* Decrement remaining autokey hashes. This isn't
* perfect if a packet is lost, but results in no harm.
*/
ap = (struct autokey *)peer->recval.ptr;
if (ap != NULL) {
if (ap->seq > 0)
ap->seq--;
}
peer->flash |= TEST8;
rval = crypto_recv(peer, rbufp);
if (rval == XEVNT_OK) {
peer->unreach = 0;
} else {
if (rval == XEVNT_ERR) {
report_event(PEVNT_RESTART, peer,
"crypto error");
peer_clear(peer, "CRYP");
peer->flash |= TEST9; /* bad crypt */
if (peer->flags & FLAG_PREEMPT)
unpeer(peer);
}
return;
}
/*
* If server mode, verify the receive key ID matches
* the transmit key ID.
*/
if (hismode == MODE_SERVER) {
if (skeyid == peer->keyid)
peer->flash &= ~TEST8;
/*
* If an extension field is present, verify only that it
* has been correctly signed. We don't need a sequence
* check here, but the sequence continues.
*/
} else if (!(peer->flash & TEST8)) {
peer->pkeyid = skeyid;
/*
* Now the fun part. Here, skeyid is the current ID in
* the packet, pkeyid is the ID in the last packet and
* tkeyid is the hash of skeyid. If the autokey values
* have not been received, this is an automatic error.
* If so, check that the tkeyid matches pkeyid. If not,
* hash tkeyid and try again. If the number of hashes
* exceeds the number remaining in the sequence, declare
* a successful failure and refresh the autokey values.
*/
} else if (ap != NULL) {
int i;
for (i = 0; ; i++) {
if ( tkeyid == peer->pkeyid
|| tkeyid == ap->key) {
peer->flash &= ~TEST8;
peer->pkeyid = skeyid;
ap->seq -= i;
break;
}
if (i > ap->seq) {
peer->crypto &=
~CRYPTO_FLAG_AUTO;
break;
}
tkeyid = session_key(
&rbufp->recv_srcadr, dstadr_sin,
tkeyid, pkeyid, 0);
}
if (peer->flash & TEST8)
report_event(PEVNT_AUTH, peer, "keylist");
}
if (!(peer->crypto & CRYPTO_FLAG_PROV)) /* test 9 */
peer->flash |= TEST8; /* bad autokey */
/*
* The maximum lifetime of the protocol is about one
* week before restarting the Autokey protocol to
* refresh certificates and leapseconds values.
*/
if (current_time > peer->refresh) {
report_event(PEVNT_RESTART, peer,
"crypto refresh");
peer_clear(peer, "TIME");
return;
}
}
#endif /* AUTOKEY */
/*
* The dance is complete and the flash bits have been lit. Toss
* the packet over the fence for processing, which may light up
* more flashers.
*/
process_packet(peer, pkt, rbufp->recv_length);
/*
* In interleaved mode update the state variables. Also adjust the
* transmit phase to avoid crossover.
*/
if (peer->flip != 0) {
peer->rec = p_rec;
peer->dst = rbufp->recv_time;
if (peer->nextdate - current_time < (1U << min(peer->ppoll,
peer->hpoll)) / 2)
peer->nextdate++;
else
peer->nextdate--;
}
}
|
Safe
|
[
"CWE-254"
] |
ntp
|
50ef2f62dc326bc9edac166b2b4ba5b5d8b4f7d4
|
2.391291041240654e+38
| 1,260 |
[Sec 2935] use L_SUB instead of L_ISGT. Juergen Perlinger
| 0 |
int tfm_load_file(const char *filename, TFMInfo *info)
{
int lf, lh, bc, ec, nw, nh, nd, ne;
int i, n;
Uchar *tfm;
Uchar *ptr;
struct stat st;
int size;
FILE *in;
Int32 *cb;
Int32 *charinfo;
Int32 *widths;
Int32 *heights;
Int32 *depths;
Uint32 checksum;
in = fopen(filename, "rb");
if(in == NULL)
return -1;
tfm = NULL;
DEBUG((DBG_FONTS, "(mt) reading TFM file `%s'\n",
filename));
/* We read the entire TFM file into core */
if(fstat(fileno(in), &st) < 0)
return -1;
if(st.st_size == 0)
goto bad_tfm;
/* allocate a word-aligned buffer to hold the file */
size = 4 * ROUND(st.st_size, 4);
if(size != st.st_size)
mdvi_warning(_("Warning: TFM file `%s' has suspicious size\n"),
filename);
tfm = (Uchar *)mdvi_malloc(size);
if(fread(tfm, st.st_size, 1, in) != 1)
goto error;
/* we don't need this anymore */
fclose(in);
in = NULL;
/* not a checksum, but serves a similar purpose */
checksum = 0;
ptr = tfm;
/* get the counters */
lf = muget2(ptr);
lh = muget2(ptr); checksum += 6 + lh;
bc = muget2(ptr);
ec = muget2(ptr); checksum += ec - bc + 1;
nw = muget2(ptr); checksum += nw;
nh = muget2(ptr); checksum += nh;
nd = muget2(ptr); checksum += nd;
checksum += muget2(ptr); /* skip italics correction count */
checksum += muget2(ptr); /* skip lig/kern table size */
checksum += muget2(ptr); /* skip kern table size */
ne = muget2(ptr); checksum += ne;
checksum += muget2(ptr); /* skip # of font parameters */
size = ec - bc + 1;
cb = (Int32 *)tfm; cb += 6 + lh;
charinfo = cb; cb += size;
widths = cb; cb += nw;
heights = cb; cb += nh;
depths = cb;
if(widths[0] || heights[0] || depths[0] ||
checksum != lf || bc - 1 > ec || ec > 255 || ne > 256)
goto bad_tfm;
/* from this point on, no error checking is done */
/* now we're at the header */
/* get the checksum */
info->checksum = muget4(ptr);
/* get the design size */
info->design = muget4(ptr);
/* get the coding scheme */
if(lh > 2) {
/* get the coding scheme */
i = n = msget1(ptr);
if(n < 0 || n > 39) {
mdvi_warning(_("%s: font coding scheme truncated to 40 bytes\n"),
filename);
n = 39;
}
memcpy(info->coding, ptr, n);
info->coding[n] = 0;
ptr += i;
} else
strcpy(info->coding, "FontSpecific");
/* get the font family */
if(lh > 12) {
n = msget1(ptr);
if(n > 0) {
i = Max(n, 63);
memcpy(info->family, ptr, i);
info->family[i] = 0;
} else
strcpy(info->family, "unspecified");
ptr += n;
}
/* now we don't read from `ptr' anymore */
info->loc = bc;
info->hic = ec;
info->type = DviFontTFM;
/* allocate characters */
info->chars = xnalloc(TFMChar, size);
#ifdef WORD_LITTLE_ENDIAN
/* byte-swap the three arrays at once (they are consecutive in memory) */
swap_array((Uint32 *)widths, nw + nh + nd);
#endif
/* get the relevant data */
ptr = (Uchar *)charinfo;
for(i = bc; i <= ec; ptr += 3, i++) {
int ndx;
ndx = (int)*ptr; ptr++;
info->chars[i-bc].advance = widths[ndx];
/* TFM files lack this information */
info->chars[i-bc].left = 0;
info->chars[i-bc].right = widths[ndx];
info->chars[i-bc].present = (ndx != 0);
if(ndx) {
ndx = ((*ptr >> 4) & 0xf);
info->chars[i-bc].height = heights[ndx];
ndx = (*ptr & 0xf);
info->chars[i-bc].depth = depths[ndx];
}
}
/* free everything */
mdvi_free(tfm);
return 0;
bad_tfm:
mdvi_error(_("%s: File corrupted, or not a TFM file\n"), filename);
error:
if(tfm) mdvi_free(tfm);
if(in) fclose(in);
return -1;
}
|
Vulnerable
|
[
"CWE-20"
] |
evince
|
d4139205b010ed06310d14284e63114e88ec6de2
|
2.996204272213269e+38
| 148 |
backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
| 1 |
UnicodeString::toUTF8(ByteSink &sink) const {
int32_t length16 = length();
if(length16 != 0) {
char stackBuffer[1024];
int32_t capacity = (int32_t)sizeof(stackBuffer);
UBool utf8IsOwned = FALSE;
char *utf8 = sink.GetAppendBuffer(length16 < capacity ? length16 : capacity,
3*length16,
stackBuffer, capacity,
&capacity);
int32_t length8 = 0;
UErrorCode errorCode = U_ZERO_ERROR;
u_strToUTF8WithSub(utf8, capacity, &length8,
getBuffer(), length16,
0xFFFD, // Standard substitution character.
NULL, // Don't care about number of substitutions.
&errorCode);
if(errorCode == U_BUFFER_OVERFLOW_ERROR) {
utf8 = (char *)uprv_malloc(length8);
if(utf8 != NULL) {
utf8IsOwned = TRUE;
errorCode = U_ZERO_ERROR;
u_strToUTF8WithSub(utf8, length8, &length8,
getBuffer(), length16,
0xFFFD, // Standard substitution character.
NULL, // Don't care about number of substitutions.
&errorCode);
} else {
errorCode = U_MEMORY_ALLOCATION_ERROR;
}
}
if(U_SUCCESS(errorCode)) {
sink.Append(utf8, length8);
sink.Flush();
}
if(utf8IsOwned) {
uprv_free(utf8);
}
}
}
|
Safe
|
[
"CWE-190",
"CWE-787"
] |
icu
|
b7d08bc04a4296982fcef8b6b8a354a9e4e7afca
|
3.9963365906608743e+37
| 40 |
ICU-20958 Prevent SEGV_MAPERR in append
See #971
| 0 |
void nft_register_chain_type(const struct nft_chain_type *ctype)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return;
}
chain_type[ctype->family][ctype->type] = ctype;
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
|
Safe
|
[
"CWE-665"
] |
linux
|
ad9f151e560b016b6ad3280b48e42fa11e1a5440
|
6.943529168222879e+37
| 10 |
netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: syzbot+ce96ca2b1d0b37c6422d@syzkaller.appspotmail.com
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
| 0 |
static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row,
const char *prefix, const char *name,
int string_value)
{
MYSQL_FIELD *field;
mysql_field_seek(result, 0);
for ( ; (field= mysql_fetch_field(result)) ; row++)
{
if (!strcmp(field->name,name))
{
if (row[0] && row[0][0] && strcmp(row[0],"0")) /* Skip default */
{
fputc(' ',file);
fputs(prefix, file);
if (string_value)
unescape(file,row[0], strlen(row[0]));
else
fputs(row[0], file);
check_io(file);
return;
}
}
}
return; /* This shouldn't happen */
} /* print_value */
|
Safe
|
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
|
2.1064019382741327e+38
| 26 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
| 0 |
X509_STORE_CTX_lookup_certs_fn X509_STORE_CTX_get_lookup_certs(X509_STORE_CTX *ctx)
{
return ctx->lookup_certs;
}
|
Safe
|
[
"CWE-295"
] |
openssl
|
2a40b7bc7b94dd7de897a74571e7024f0cf0d63b
|
2.8225050655092867e+38
| 4 |
check_chain_extensions: Do not override error return value by check_curve
The X509_V_FLAG_X509_STRICT flag enables additional security checks of the
certificates present in a certificate chain. It is not set by default.
Starting from OpenSSL version 1.1.1h a check to disallow certificates with
explicitly encoded elliptic curve parameters in the chain was added to the
strict checks.
An error in the implementation of this check meant that the result of a
previous check to confirm that certificates in the chain are valid CA
certificates was overwritten. This effectively bypasses the check
that non-CA certificates must not be able to issue other certificates.
If a "purpose" has been configured then a subsequent check that the
certificate is consistent with that purpose also checks that it is a
valid CA. Therefore where a purpose is set the certificate chain will
still be rejected even when the strict flag has been used. A purpose is
set by default in libssl client and server certificate verification
routines, but it can be overriden by an application.
Affected applications explicitly set the X509_V_FLAG_X509_STRICT
verification flag and either do not set a purpose for the certificate
verification or, in the case of TLS client or server applications,
override the default purpose to make it not set.
CVE-2021-3450
Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
| 0 |
flatview_extend_translation(struct uc_struct *uc, FlatView *fv, hwaddr addr,
hwaddr target_len,
MemoryRegion *mr, hwaddr base, hwaddr len,
bool is_write, MemTxAttrs attrs)
{
hwaddr done = 0;
hwaddr xlat;
MemoryRegion *this_mr;
for (;;) {
target_len -= len;
addr += len;
done += len;
if (target_len == 0) {
return done;
}
len = target_len;
this_mr = flatview_translate(uc, fv, addr, &xlat,
&len, is_write, attrs);
if (this_mr != mr || xlat != base + done) {
return done;
}
}
}
|
Safe
|
[
"CWE-476"
] |
unicorn
|
3d3deac5e6d38602b689c4fef5dac004f07a2e63
|
2.2631184932480544e+38
| 25 |
Fix crash when mapping a big memory and calling uc_close
| 0 |
xrdp_mm_sync_unload(long param1, long param2)
{
return g_free_library(param1);
}
|
Safe
|
[] |
xrdp
|
d8f9e8310dac362bb9578763d1024178f94f4ecc
|
2.800259726592796e+38
| 4 |
move temp files from /tmp to /tmp/.xrdp
| 0 |
xmlTreeErrMemory(const char *extra)
{
__xmlSimpleError(XML_FROM_TREE, XML_ERR_NO_MEMORY, NULL, NULL, extra);
}
|
Safe
|
[
"CWE-20"
] |
libxml2
|
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
|
2.049323736610707e+38
| 4 |
Avoid building recursive entities
For https://bugzilla.gnome.org/show_bug.cgi?id=762100
When we detect a recusive entity we should really not
build the associated data, moreover if someone bypass
libxml2 fatal errors and still tries to serialize a broken
entity make sure we don't risk to get ito a recursion
* parser.c: xmlParserEntityCheck() don't build if entity loop
were found and remove the associated text content
* tree.c: xmlStringGetNodeList() avoid a potential recursion
| 0 |
yang_read_type(struct ly_ctx *ctx, void *parent, char *value, enum yytokentype type)
{
struct yang_type *typ;
struct lys_deviate *dev;
typ = calloc(1, sizeof *typ);
LY_CHECK_ERR_RETURN(!typ, LOGMEM(ctx), NULL);
typ->flags = LY_YANG_STRUCTURE_FLAG;
switch (type) {
case LEAF_KEYWORD:
if (((struct lys_node_leaf *)parent)->type.der) {
LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYS, parent, "type", "leaf");
goto error;
}
((struct lys_node_leaf *)parent)->type.der = (struct lys_tpdf *)typ;
((struct lys_node_leaf *)parent)->type.parent = (struct lys_tpdf *)parent;
typ->type = &((struct lys_node_leaf *)parent)->type;
break;
case LEAF_LIST_KEYWORD:
if (((struct lys_node_leaflist *)parent)->type.der) {
LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYS, parent, "type", "leaf-list");
goto error;
}
((struct lys_node_leaflist *)parent)->type.der = (struct lys_tpdf *)typ;
((struct lys_node_leaflist *)parent)->type.parent = (struct lys_tpdf *)parent;
typ->type = &((struct lys_node_leaflist *)parent)->type;
break;
case UNION_KEYWORD:
((struct lys_type *)parent)->der = (struct lys_tpdf *)typ;
typ->type = (struct lys_type *)parent;
break;
case TYPEDEF_KEYWORD:
if (((struct lys_tpdf *)parent)->type.der) {
LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "type", "typedef");
goto error;
}
((struct lys_tpdf *)parent)->type.der = (struct lys_tpdf *)typ;
typ->type = &((struct lys_tpdf *)parent)->type;
break;
case REPLACE_KEYWORD:
/* deviation replace type*/
dev = (struct lys_deviate *)parent;
if (dev->type) {
LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "type", "deviation");
goto error;
}
dev->type = calloc(1, sizeof *dev->type);
LY_CHECK_ERR_GOTO(!dev->type, LOGMEM(ctx), error);
dev->type->der = (struct lys_tpdf *)typ;
typ->type = dev->type;
break;
case EXTENSION_INSTANCE:
((struct lys_type *)parent)->der = (struct lys_tpdf *)typ;
typ->type = parent;
break;
default:
goto error;
break;
}
typ->name = lydict_insert_zc(ctx, value);
return typ;
error:
free(value);
free(typ);
return NULL;
}
|
Safe
|
[
"CWE-415"
] |
libyang
|
d9feacc4a590d35dbc1af21caf9080008b4450ed
|
2.2861579617654565e+38
| 68 |
yang parser BUGFIX double free
Fixes #742
| 0 |
check_ENQUEUE(const struct ofpact_enqueue *a,
const struct ofpact_check_params *cp)
{
if (ofp_to_u16(a->port) >= ofp_to_u16(cp->max_ports)
&& a->port != OFPP_IN_PORT
&& a->port != OFPP_LOCAL) {
return OFPERR_OFPBAC_BAD_OUT_PORT;
}
return 0;
}
|
Safe
|
[
"CWE-416"
] |
ovs
|
77cccc74deede443e8b9102299efc869a52b65b2
|
2.684384996951204e+38
| 10 |
ofp-actions: Fix use-after-free while decoding RAW_ENCAP.
While decoding RAW_ENCAP action, decode_ed_prop() might re-allocate
ofpbuf if there is no enough space left. However, function
'decode_NXAST_RAW_ENCAP' continues to use old pointer to 'encap'
structure leading to write-after-free and incorrect decoding.
==3549105==ERROR: AddressSanitizer: heap-use-after-free on address
0x60600000011a at pc 0x0000005f6cc6 bp 0x7ffc3a2d4410 sp 0x7ffc3a2d4408
WRITE of size 2 at 0x60600000011a thread T0
#0 0x5f6cc5 in decode_NXAST_RAW_ENCAP lib/ofp-actions.c:4461:20
#1 0x5f0551 in ofpact_decode ./lib/ofp-actions.inc2:4777:16
#2 0x5ed17c in ofpacts_decode lib/ofp-actions.c:7752:21
#3 0x5eba9a in ofpacts_pull_openflow_actions__ lib/ofp-actions.c:7791:13
#4 0x5eb9fc in ofpacts_pull_openflow_actions lib/ofp-actions.c:7835:12
#5 0x64bb8b in ofputil_decode_packet_out lib/ofp-packet.c:1113:17
#6 0x65b6f4 in ofp_print_packet_out lib/ofp-print.c:148:13
#7 0x659e3f in ofp_to_string__ lib/ofp-print.c:1029:16
#8 0x659b24 in ofp_to_string lib/ofp-print.c:1244:21
#9 0x65a28c in ofp_print lib/ofp-print.c:1288:28
#10 0x540d11 in ofctl_ofp_parse utilities/ovs-ofctl.c:2814:9
#11 0x564228 in ovs_cmdl_run_command__ lib/command-line.c:247:17
#12 0x56408a in ovs_cmdl_run_command lib/command-line.c:278:5
#13 0x5391ae in main utilities/ovs-ofctl.c:179:9
#14 0x7f6911ce9081 in __libc_start_main (/lib64/libc.so.6+0x27081)
#15 0x461fed in _start (utilities/ovs-ofctl+0x461fed)
Fix that by getting a new pointer before using.
Credit to OSS-Fuzz.
Fuzzer regression test will fail only with AddressSanitizer enabled.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=27851
Fixes: f839892a206a ("OF support and translation of generic encap and decap")
Acked-by: William Tu <u9012063@gmail.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
| 0 |
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip ip;
int err = -EINVAL;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
if (timer_pending(&br->multicast_querier_timer))
return -EBUSY;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
else
ip.u.ip6 = entry->addr.u.ip6;
#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &ip);
if (!mp)
goto unlock;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (!p->port || p->port->dev->ifindex != entry->ifindex)
continue;
if (p->port->state == BR_STATE_DISABLED)
goto unlock;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
err = 0;
if (!mp->ports && !mp->mglist && mp->timer_armed &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
|
Safe
|
[
"CWE-20"
] |
linux
|
c7e8e8a8f7a70b343ca1e0f90a31e35ab2d16de1
|
8.625753410035726e+37
| 55 |
bridge: fix some kernel warning in multicast timer
Several people reported the warning: "kernel BUG at kernel/timer.c:729!"
and the stack trace is:
#7 [ffff880214d25c10] mod_timer+501 at ffffffff8106d905
#8 [ffff880214d25c50] br_multicast_del_pg.isra.20+261 at ffffffffa0731d25 [bridge]
#9 [ffff880214d25c80] br_multicast_disable_port+88 at ffffffffa0732948 [bridge]
#10 [ffff880214d25cb0] br_stp_disable_port+154 at ffffffffa072bcca [bridge]
#11 [ffff880214d25ce8] br_device_event+520 at ffffffffa072a4e8 [bridge]
#12 [ffff880214d25d18] notifier_call_chain+76 at ffffffff8164aafc
#13 [ffff880214d25d50] raw_notifier_call_chain+22 at ffffffff810858f6
#14 [ffff880214d25d60] call_netdevice_notifiers+45 at ffffffff81536aad
#15 [ffff880214d25d80] dev_close_many+183 at ffffffff81536d17
#16 [ffff880214d25dc0] rollback_registered_many+168 at ffffffff81537f68
#17 [ffff880214d25de8] rollback_registered+49 at ffffffff81538101
#18 [ffff880214d25e10] unregister_netdevice_queue+72 at ffffffff815390d8
#19 [ffff880214d25e30] __tun_detach+272 at ffffffffa074c2f0 [tun]
#20 [ffff880214d25e88] tun_chr_close+45 at ffffffffa074c4bd [tun]
#21 [ffff880214d25ea8] __fput+225 at ffffffff8119b1f1
#22 [ffff880214d25ef0] ____fput+14 at ffffffff8119b3fe
#23 [ffff880214d25f00] task_work_run+159 at ffffffff8107cf7f
#24 [ffff880214d25f30] do_notify_resume+97 at ffffffff810139e1
#25 [ffff880214d25f50] int_signal+18 at ffffffff8164f292
this is due to I forgot to check if mp->timer is armed in
br_multicast_del_pg(). This bug is introduced by
commit 9f00b2e7cf241fa389733d41b6 (bridge: only expire the mdb entry
when query is received).
Same for __br_mdb_del().
Tested-by: poma <pomidorabelisima@gmail.com>
Reported-by: LiYonghua <809674045@qq.com>
Reported-by: Robert Hancock <hancockrwd@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Stephen Hemminger <stephen@networkplumber.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
CImg<T>& operator>>=(const t value) {
if (is_empty()) return *this;
cimg_openmp_for(*this,((longT)*ptr) >> (int)value,65536);
return *this;
}
|
Safe
|
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
|
3.1123443167150326e+38
| 5 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
| 0 |
xmlSchemaPErrExt(xmlSchemaParserCtxtPtr ctxt, xmlNodePtr node, int error,
const xmlChar * strData1, const xmlChar * strData2,
const xmlChar * strData3, const char *msg, const xmlChar * str1,
const xmlChar * str2, const xmlChar * str3, const xmlChar * str4,
const xmlChar * str5)
{
xmlGenericErrorFunc channel = NULL;
xmlStructuredErrorFunc schannel = NULL;
void *data = NULL;
if (ctxt != NULL) {
ctxt->nberrors++;
ctxt->err = error;
channel = ctxt->error;
data = ctxt->errCtxt;
schannel = ctxt->serror;
}
__xmlRaiseError(schannel, channel, data, ctxt, node, XML_FROM_SCHEMASP,
error, XML_ERR_ERROR, NULL, 0,
(const char *) strData1, (const char *) strData2,
(const char *) strData3, 0, 0, msg, str1, str2,
str3, str4, str5);
}
|
Safe
|
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
|
3.309045286380082e+38
| 24 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
| 0 |
static int display_preclose(void *handle, void *device)
{
IMAGE *img = image_find(handle, device);
if (img == NULL)
return -1;
gtk_main_iteration_do(FALSE);
img->buf = NULL;
img->width = 0;
img->height = 0;
img->rowstride = 0;
img->format = 0;
gtk_widget_destroy(img->window);
img->window = NULL;
img->scroll = NULL;
img->darea = NULL;
if (img->cmap)
gdk_rgb_cmap_free(img->cmap);
img->cmap = NULL;
if (img->rgbbuf)
free(img->rgbbuf);
img->rgbbuf = NULL;
gtk_main_iteration_do(FALSE);
return 0;
}
|
Vulnerable
|
[] |
ghostpdl
|
514595fc2cc84f51efdef563cf7a35a0050902e5
|
3.6486368059506555e+37
| 29 |
Bug 693038 - allow gsx to build against GTK+ 3.x
Patch from galtgendo@gmail.com applied with changes to maintain compatibility
with GTK+ 2.x, and replace a function deprecated in GTK+ 3.x.
This patch drops GTK+ 1.x support.
No cluster differences.
| 1 |
static void rm_read_metadata(AVFormatContext *s, AVIOContext *pb, int wide)
{
char buf[1024];
int i;
for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
int len = wide ? avio_rb16(pb) : avio_r8(pb);
get_strl(pb, buf, sizeof(buf), len);
av_dict_set(&s->metadata, ff_rm_metadata[i], buf, 0);
}
}
|
Safe
|
[
"CWE-399",
"CWE-834"
] |
FFmpeg
|
124eb202e70678539544f6268efc98131f19fa49
|
3.235332776110256e+36
| 11 |
avformat/rmdec: Fix DoS due to lack of eof check
Fixes: loop.ivr
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
int propfind_by_collection(const mbentry_t *mbentry, void *rock)
{
struct propfind_ctx *fctx = (struct propfind_ctx *) rock;
const char *mboxname = mbentry->name;
struct buf writebuf = BUF_INITIALIZER;
struct mailbox *mailbox = NULL;
char *p;
size_t len;
int r = 0, rights = 0;
/* skip deleted items */
if (mboxname_isdeletedmailbox(mbentry->name, 0) ||
(mbentry->mbtype & MBTYPE_DELETED)) {
goto done;
}
/* Check ACL on mailbox for current user */
rights = httpd_myrights(httpd_authstate, mbentry);
if ((rights & fctx->reqd_privs) != fctx->reqd_privs) goto done;
/* We only match known types */
if (!(mbentry->mbtype & fctx->req_tgt->namespace->mboxtype)) goto done;
p = strrchr(mboxname, '.');
if (!p) goto done;
p++; /* skip dot */
switch (fctx->req_tgt->namespace->id) {
case URL_NS_DRIVE:
if (fctx->req_tgt->flags == TGT_DRIVE_USER) {
/* Special case of listing users with DAV #drives */
p = strchr(mboxname+5, '.') + 1; /* skip "user.XXX." */
if (strcmp(p, fctx->req_tgt->mboxprefix)) goto done;
}
else if (p - mboxname > 1 + (int) strlen(fctx->req_tgt->mbentry->name)) {
/* Reject folders that are more than one level deep */
goto done;
}
break;
default:
/* Magic folder filter */
if (httpd_extrafolder && strcasecmp(p, httpd_extrafolder)) goto done;
break;
}
/* skip toplevels */
if (config_getswitch(IMAPOPT_FASTMAILSHARING) && *p == '#')
goto done;
/* Open mailbox for reading */
if ((r = mailbox_open_irl(mboxname, &mailbox))) {
syslog(LOG_INFO, "mailbox_open_irl(%s) failed: %s",
mboxname, error_message(r));
}
fctx->mbentry = mbentry;
fctx->mailbox = mailbox;
fctx->record = NULL;
if (!fctx->req_tgt->resource) {
/* we always have zzzz if it's already in the URL */
int haszzzz = fctx->req_tgt->flags & TGT_USER_ZZZZ;
mbname_t *mbname = mbname_from_intname(mboxname);
if (!mbname_domain(mbname))
mbname_set_domain(mbname, httpd_extradomain);
/* we also need to deal with the discovery case,
* where mboxname doesn't match request path */
if (fctx->req_tgt->userid &&
strcmpsafe(mbname_userid(mbname), fctx->req_tgt->userid)) {
haszzzz = 1;
}
len = make_collection_url(&writebuf, fctx->req_tgt->namespace->prefix,
haszzzz, mbname, fctx->req_tgt->userid);
mbname_free(&mbname);
/* copy it all back into place... in theory we should check against
* 'last' and make sure it doesn't change from the original request.
* yay for micro-optimised memory usage... */
strlcpy(fctx->req_tgt->path, buf_cstring(&writebuf), MAX_MAILBOX_PATH);
p = fctx->req_tgt->path + len;
fctx->req_tgt->collection = p;
fctx->req_tgt->collen = strlen(p);
/* If not filtering by calendar resource, and not excluding root,
add response for collection */
if (!r && !fctx->filter_crit && !(fctx->prefer & PREFER_NOROOT) &&
(r = xml_add_response(fctx, 0, 0, NULL, NULL))) goto done;
}
if (r) {
xml_add_response(fctx, HTTP_SERVER_ERROR, 0, error_message(r), NULL);
goto done;
}
if (fctx->depth > 1 && fctx->open_db) { // can't do davdb searches if no dav db
/* Resource(s) */
r = propfind_by_resources(fctx);
}
done:
buf_free(&writebuf);
if (mailbox) mailbox_close(&mailbox);
return 0;
}
|
Safe
|
[] |
cyrus-imapd
|
6703ff881b6056e0c045a7b795ce8ba1bbb87027
|
1.0897563313565243e+38
| 111 |
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
| 0 |
void expectSessionCreate() {
// Expectations are in LIFO order.
TestSessionPtr new_test_session(new TestSession());
new_test_session->timeout_timer_ = new Event::MockTimer(&dispatcher_);
new_test_session->interval_timer_ = new Event::MockTimer(&dispatcher_);
test_sessions_.emplace_back(std::move(new_test_session));
expectClientCreate(test_sessions_.size() - 1);
}
|
Safe
|
[
"CWE-476"
] |
envoy
|
9b1c3962172a972bc0359398af6daa3790bb59db
|
2.4020966432796893e+38
| 8 |
healthcheck: fix grpc inline removal crashes (#749)
Signed-off-by: Matt Klein <mklein@lyft.com>
Signed-off-by: Pradeep Rao <pcrao@google.com>
| 0 |
print_distance_range(FILE* f, OnigDistance a, OnigDistance b)
{
if (a == ONIG_INFINITE_DISTANCE)
fputs("inf", f);
else
fprintf(f, "(%u)", a);
fputs("-", f);
if (b == ONIG_INFINITE_DISTANCE)
fputs("inf", f);
else
fprintf(f, "(%u)", b);
}
|
Safe
|
[
"CWE-125"
] |
php-src
|
c6e34d91b88638966662caac62c4d0e90538e317
|
2.1538297406376057e+38
| 14 |
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
| 0 |
static void sas_scsi_clear_queue_port(struct list_head *error_q,
struct asd_sas_port *port)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct asd_sas_port *x = dev->port;
if (x == port)
sas_eh_finish_cmd(cmd);
}
}
|
Safe
|
[] |
linux
|
318aaf34f1179b39fa9c30fa0f3288b645beee39
|
5.22329868692529e+37
| 13 |
scsi: libsas: defer ata device eh commands to libata
When ata device doing EH, some commands still attached with tasks are
not passed to libata when abort failed or recover failed, so libata did
not handle these commands. After these commands done, sas task is freed,
but ata qc is not freed. This will cause ata qc leak and trigger a
warning like below:
WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037
ata_eh_finish+0xb4/0xcc
CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1
......
Call trace:
[<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc
[<ffff0000088b8420>] ata_do_eh+0xc4/0xd8
[<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c
[<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694
[<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80
[<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170
[<ffff0000080ebd70>] process_one_work+0x144/0x390
[<ffff0000080ec100>] worker_thread+0x144/0x418
[<ffff0000080f2c98>] kthread+0x10c/0x138
[<ffff0000080855dc>] ret_from_fork+0x10/0x18
If ata qc leaked too many, ata tag allocation will fail and io blocked
for ever.
As suggested by Dan Williams, defer ata device commands to libata and
merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle
ata qcs correctly after this.
Signed-off-by: Jason Yan <yanaijie@huawei.com>
CC: Xiaofei Tan <tanxiaofei@huawei.com>
CC: John Garry <john.garry@huawei.com>
CC: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
| 0 |
void BuildHistogram(FILE* fin, int** histo) {
int height = FLAGS_height;
int width = FLAGS_width;
int skip = FLAGS_skip;
size_t min_distance = FLAGS_min_distance;
printf("height = %d, width = %d\n", height, width);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
histo[i][j] = 0;
}
}
int max_pos = FLAGS_size - skip;
double min_dist = min_distance > 0 ? DistanceTransform(min_distance) : 0;
double max_dist = DistanceTransform(GetMaxDistance()) - min_dist;
int copy, pos, distance, x, y;
double dist;
while (ReadBackwardReference(fin, ©, &pos, &distance)) {
if (pos == -1) continue; // In case when only insert is present.
if (distance < min_distance || distance >= GetMaxDistance()) continue;
if (FLAGS_brotli_window != -1) {
AdjustPosition(&pos);
}
if (pos >= skip && distance <= pos) {
pos -= skip;
if (pos >= max_pos) break;
dist = DistanceTransform(static_cast<double>(distance)) - min_dist;
x = std::min(static_cast<int>(round(dist / max_dist * height)),
height - 1);
y = 1ul * pos * width / max_pos;
if (!(y >= 0 && y < width)) {
printf("pos = %d, max_pos = %d, y = %d\n", pos, max_pos, y);
assert(y >= 0 && y < width);
}
if (FLAGS_with_copies) {
int right = 1ul * (pos + copy - 1) * width / max_pos;
if (right < 0) {
printf("pos = %d, distance = %d, copy = %d, y = %d, right = %d\n",
pos, distance, copy, y, right);
assert(right >= 0);
}
if (y == right) {
histo[x][y] += copy;
} else {
int pos2 = static_cast<int>(ceil(1.0 * (y + 1) * max_pos / width));
histo[x][y] += pos2 - pos;
for (int i = y + 1; i < right && i < width; ++i) {
histo[x][i] += max_pos / width; // Sometimes 1 more, but who cares.
}
// Make sure the match doesn't go beyond the image.
if (right < width) {
pos2 = static_cast<int>(ceil(1.0 * right * max_pos / width));
histo[x][right] += pos + copy - 1 - pos2 + 1;
}
}
} else {
histo[x][y]++;
}
}
}
}
|
Safe
|
[
"CWE-120"
] |
brotli
|
223d80cfbec8fd346e32906c732c8ede21f0cea6
|
1.5895898125184367e+37
| 65 |
Update (#826)
* IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB
* simplify max Huffman table size calculation
* eliminate symbol duplicates (static arrays in .h files)
* minor combing in research/ code
| 0 |
set_icr(E1000State *s, int index, uint32_t val)
{
DBGOUT(INTERRUPT, "set_icr %x\n", val);
set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
}
|
Safe
|
[
"CWE-120"
] |
qemu
|
b0d9ffcd0251161c7c92f94804dcf599dfa3edeb
|
1.1421813201841897e+38
| 5 |
e1000: Discard packets that are too long if !SBP and !LPE
The e1000_receive function for the e1000 needs to discard packets longer than
1522 bytes if the SBP and LPE flags are disabled. The linux driver assumes
this behavior and allocates memory based on this assumption.
Signed-off-by: Michael Contreras <michael@inetric.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
| 0 |
void AccessControlTest::check_local_datawriter(
const RTPSParticipantAttributes& participant_attr,
bool success)
{
PermissionsHandle* access_handle;
get_access_handle(participant_attr, &access_handle);
SecurityException exception;
bool result = access_plugin.check_create_datawriter(
*access_handle,
domain_id,
topic_name,
partitions,
exception);
if (success)
{
ASSERT_TRUE(result) << exception.what();
}
else
{
ASSERT_FALSE(result);
}
ASSERT_TRUE(access_plugin.return_permissions_handle(access_handle, exception)) << exception.what();
}
|
Safe
|
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
|
3.1456499775912955e+38
| 26 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <ikerluengo@eprosima.com>
Co-authored-by: Miguel Company <MiguelCompany@eprosima.com>
| 0 |
ixfr_rrstream_first(rrstream_t *rs) {
ixfr_rrstream_t *s = (ixfr_rrstream_t *) rs;
return (dns_journal_first_rr(s->journal));
}
|
Safe
|
[
"CWE-732"
] |
bind9
|
34348d9ee4db15307c6c42db294419b4df569f76
|
2.29715553347463e+38
| 4 |
denied axfr requests were not effective for writable DLZ zones
(cherry picked from commit d9077cd0038e59726e1956de18b4b7872038a283)
| 0 |
void jslTokenAsString(int token, char *str, size_t len) {
// see JS_ERROR_TOKEN_BUF_SIZE
if (token>32 && token<128) {
assert(len>=4);
str[0] = '\'';
str[1] = (char)token;
str[2] = '\'';
str[3] = 0;
return;
}
switch (token) {
case LEX_EOF : strncpy(str, "EOF", len); return;
case LEX_ID : strncpy(str, "ID", len); return;
case LEX_INT : strncpy(str, "INT", len); return;
case LEX_FLOAT : strncpy(str, "FLOAT", len); return;
case LEX_STR : strncpy(str, "STRING", len); return;
case LEX_UNFINISHED_STR : strncpy(str, "UNFINISHED STRING", len); return;
case LEX_TEMPLATE_LITERAL : strncpy(str, "TEMPLATE LITERAL", len); return;
case LEX_UNFINISHED_TEMPLATE_LITERAL : strncpy(str, "UNFINISHED TEMPLATE LITERAL", len); return;
case LEX_REGEX : strncpy(str, "REGEX", len); return;
case LEX_UNFINISHED_REGEX : strncpy(str, "UNFINISHED REGEX", len); return;
case LEX_UNFINISHED_COMMENT : strncpy(str, "UNFINISHED COMMENT", len); return;
}
if (token>=_LEX_OPERATOR_START && token<_LEX_R_LIST_END) {
const char tokenNames[] =
/* LEX_EQUAL : */ "==\0"
/* LEX_TYPEEQUAL : */ "===\0"
/* LEX_NEQUAL : */ "!=\0"
/* LEX_NTYPEEQUAL : */ "!==\0"
/* LEX_LEQUAL : */ "<=\0"
/* LEX_LSHIFT : */ "<<\0"
/* LEX_LSHIFTEQUAL : */ "<<=\0"
/* LEX_GEQUAL : */ ">=\0"
/* LEX_RSHIFT : */ ">>\0"
/* LEX_RSHIFTUNSIGNED */ ">>>\0"
/* LEX_RSHIFTEQUAL : */ ">>=\0"
/* LEX_RSHIFTUNSIGNEDEQUAL */ ">>>=\0"
/* LEX_PLUSEQUAL : */ "+=\0"
/* LEX_MINUSEQUAL : */ "-=\0"
/* LEX_PLUSPLUS : */ "++\0"
/* LEX_MINUSMINUS */ "--\0"
/* LEX_MULEQUAL : */ "*=\0"
/* LEX_DIVEQUAL : */ "/=\0"
/* LEX_MODEQUAL : */ "%=\0"
/* LEX_ANDEQUAL : */ "&=\0"
/* LEX_ANDAND : */ "&&\0"
/* LEX_OREQUAL : */ "|=\0"
/* LEX_OROR : */ "||\0"
/* LEX_XOREQUAL : */ "^=\0"
/* LEX_ARROW_FUNCTION */ "=>\0"
// reserved words
/*LEX_R_IF : */ "if\0"
/*LEX_R_ELSE : */ "else\0"
/*LEX_R_DO : */ "do\0"
/*LEX_R_WHILE : */ "while\0"
/*LEX_R_FOR : */ "for\0"
/*LEX_R_BREAK : */ "return\0"
/*LEX_R_CONTINUE */ "continue\0"
/*LEX_R_FUNCTION */ "function\0"
/*LEX_R_RETURN */ "return\0"
/*LEX_R_VAR : */ "var\0"
/*LEX_R_LET : */ "let\0"
/*LEX_R_CONST : */ "const\0"
/*LEX_R_THIS : */ "this\0"
/*LEX_R_THROW : */ "throw\0"
/*LEX_R_TRY : */ "try\0"
/*LEX_R_CATCH : */ "catch\0"
/*LEX_R_FINALLY : */ "finally\0"
/*LEX_R_TRUE : */ "true\0"
/*LEX_R_FALSE : */ "false\0"
/*LEX_R_NULL : */ "null\0"
/*LEX_R_UNDEFINED */ "undefined\0"
/*LEX_R_NEW : */ "new\0"
/*LEX_R_IN : */ "in\0"
/*LEX_R_INSTANCEOF */ "instanceof\0"
/*LEX_R_SWITCH */ "switch\0"
/*LEX_R_CASE */ "case\0"
/*LEX_R_DEFAULT */ "default\0"
/*LEX_R_DELETE */ "delete\0"
/*LEX_R_TYPEOF : */ "typeof\0"
/*LEX_R_VOID : */ "void\0"
/*LEX_R_DEBUGGER : */ "debugger\0"
/*LEX_R_CLASS : */ "class\0"
/*LEX_R_EXTENDS : */ "extends\0"
/*LEX_R_SUPER : */ "super\0"
/*LEX_R_STATIC : */ "static\0"
;
unsigned int p = 0;
int n = token-_LEX_OPERATOR_START;
while (n>0 && p<sizeof(tokenNames)) {
while (tokenNames[p] && p<sizeof(tokenNames)) p++;
p++; // skip the zero
n--; // next token
}
assert(n==0);
strncpy(str, &tokenNames[p], len);
return;
}
assert(len>=10);
espruino_snprintf(str, len, "?[%d]", token);
}
|
Vulnerable
|
[
"CWE-787"
] |
Espruino
|
bed844f109b6c222816740555068de2e101e8018
|
2.8299072623827016e+38
| 104 |
remove strncpy usage as it's effectively useless, replace with an assertion since fn is only used internally (fix #1426)
| 1 |
static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s)
{
int copy_count;
uint8_t *end_ptr;
if (s->cirrus_srccounter > 0) {
if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) {
cirrus_bitblt_common_patterncopy(s, s->cirrus_bltbuf);
the_end:
s->cirrus_srccounter = 0;
cirrus_bitblt_reset(s);
} else {
/* at least one scan line */
do {
(*s->cirrus_rop)(s, s->vga.vram_ptr +
(s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
s->cirrus_bltbuf, 0, 0, s->cirrus_blt_width, 1);
cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, 0,
s->cirrus_blt_width, 1);
s->cirrus_blt_dstaddr += s->cirrus_blt_dstpitch;
s->cirrus_srccounter -= s->cirrus_blt_srcpitch;
if (s->cirrus_srccounter <= 0)
goto the_end;
/* more bytes than needed can be transferred because of
word alignment, so we keep them for the next line */
/* XXX: keep alignment to speed up transfer */
end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
copy_count = s->cirrus_srcptr_end - end_ptr;
memmove(s->cirrus_bltbuf, end_ptr, copy_count);
s->cirrus_srcptr = s->cirrus_bltbuf + copy_count;
s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
} while (s->cirrus_srcptr >= s->cirrus_srcptr_end);
}
}
}
|
Safe
|
[
"CWE-125"
] |
qemu
|
f153b563f8cf121aebf5a2fff5f0110faf58ccb3
|
2.6153576513371882e+38
| 35 |
cirrus: handle negative pitch in cirrus_invalidate_region()
cirrus_invalidate_region() calls memory_region_set_dirty()
on a per-line basis, always ranging from off_begin to
off_begin+bytesperline. With a negative pitch off_begin
marks the top most used address and thus we need to do an
initial shift backwards by a line for negative pitches of
backward blits, otherwise the first iteration covers the
line going from the start offset forwards instead of
backwards.
Additionally since the start address is inclusive, if we
shift by a full `bytesperline` we move to the first address
*not* included in the blit, so we only shift by one less
than bytesperline.
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Message-id: 1485352137-29367-1-git-send-email-w.bumiller@proxmox.com
[ kraxel: codestyle fixes ]
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
| 0 |
void Scan::MakeHiddenRefinementScan(UBYTE bitposition,class Component *comp,UBYTE start,UBYTE stop)
{
bool colortrafo = m_pFrame->TablesOf()->hasSeparateChroma(m_pFrame->DepthOf());
bool residual = false; // for a residual scan type.
assert(m_pParser == NULL);
if (m_pFrame->DepthOf() > 4)
JPG_THROW(INVALID_PARAMETER,"Scan::MakeHiddenRefinementScan",
"hidden refinement scans are confined to four components at most");
m_ucScanStart = start;
m_ucScanStop = stop;
m_ucLowBit = bitposition;
m_ucHighBit = bitposition+1;
m_ucHiddenBits = 0; // not here anymore.
m_bHidden = true;
switch(m_pFrame->ScanTypeOf()) {
case Residual:
case ACResidual:
case ResidualProgressive:
case ACResidualProgressive:
// Only one component in the scan.
assert(stop >= start);
m_ucCount = 1;
m_ucComponent[0] = comp->IDOf();
break;
default:
if (start == 0) {
UBYTE i;
assert(stop == 0); // This is a DC scan, hopefully.
m_ucCount = m_pFrame->DepthOf();
for(i = 0;i < m_ucCount;i++) {
m_ucComponent[i] = m_pFrame->ComponentOf(i)->IDOf();
m_ucDCTable[i] = 0;
m_ucACTable[i] = 0; // Fixed later.
}
} else {
// Only one component in the scan.
assert(stop >= start);
m_ucCount = 1;
m_ucComponent[0] = comp->IDOf();
}
break;
}
switch(m_pFrame->ScanTypeOf()) {
case Baseline:
case Sequential:
case Progressive:
if (colortrafo) {
m_ucACTable[0] = (comp && comp->IndexOf() == 0)?(0):(1); // Luma uses a separate table.
m_ucDCTable[0] = 0;
m_ucDCTable[1] = m_ucDCTable[2] = m_ucDCTable[3] = 1; // Chroma uses a separate table.
} else {
m_ucACTable[0] = 0;
m_ucDCTable[0] = 0;
m_ucDCTable[1] = m_ucDCTable[2] = m_ucDCTable[3] = 0; // Chroma uses the same table.
}
m_pHuffman = new(m_pEnviron) HuffmanTable(m_pEnviron);
m_pParser = new(m_pEnviron) RefinementScan(m_pFrame,this,
start,stop,
bitposition,bitposition+1,
false,false);
break;
case ACSequential:
case ACProgressive:
#if ACCUSOFT_CODE
m_ucACTable[0] = 0;
m_ucDCTable[0] = 0;
m_pConditioner = new(m_pEnviron) ACTable(m_pEnviron);
m_pParser = new(m_pEnviron) ACRefinementScan(m_pFrame,this,
start,stop,
bitposition,bitposition+1,
false,false);
#else
JPG_THROW(NOT_IMPLEMENTED," Scan::MakeHiddenRefinementScan",
"Arithmetic coding option not available in your code release, please contact Accusoft for a full version");
#endif
break;
case Residual:
case ResidualProgressive:
residual = true;
// runs into the following.
case ResidualDCT:
if (colortrafo) {
m_ucACTable[0] = (comp && comp->IndexOf() == 0)?(0):(1); // Luma uses a separate table.
m_ucDCTable[0] = 0;
m_ucDCTable[1] = m_ucDCTable[2] = m_ucDCTable[3] = 1; // Chroma uses a separate table.
} else {
m_ucACTable[0] = 0;
m_ucDCTable[0] = 0;
m_ucDCTable[1] = m_ucDCTable[2] = m_ucDCTable[3] = 0; // Chroma uses the same table.
}
assert(residual == false || (start == 0 && stop == 63));
m_pHuffman = new(m_pEnviron) HuffmanTable(m_pEnviron);
m_pParser = new(m_pEnviron) RefinementScan(m_pFrame,this,
start,stop,
bitposition,bitposition+1,
false,residual);
break;
case ACResidual:
case ACResidualProgressive:
residual = true;
// fall through
case ACResidualDCT:
#if ACCUSOFT_CODE
m_ucACTable[0] = 0;
m_ucDCTable[0] = 0;
assert(residual == false || (start == 0 && stop == 63));
m_pConditioner = new(m_pEnviron) ACTable(m_pEnviron);
m_pParser = new(m_pEnviron) ACRefinementScan(m_pFrame,this,
start,stop,
bitposition,bitposition+1,
false,residual);
#else
JPG_THROW(NOT_IMPLEMENTED," Scan::MakeHiddenRefinementScan",
"Arithmetic coding option not available in your code release, please contact Accusoft for a full version");
#endif
break;
default:
JPG_THROW(INVALID_PARAMETER,"Scan::MakeHiddenRefinementScan",
"frame type does not support hidden refinement scans");
break;
}
}
|
Safe
|
[
"CWE-476"
] |
libjpeg
|
ea6315164b1649ff932a396b7600eac4bffcfaba
|
1.0192408214076824e+38
| 132 |
Added a check whether all components in a scan are actually present.
| 0 |
GF_Box *btrt_box_new()
{
GF_BitRateBox *tmp = (GF_BitRateBox *) gf_malloc(sizeof(GF_BitRateBox));
if (tmp == NULL) return NULL;
memset(tmp, 0, sizeof(GF_BitRateBox));
tmp->type = GF_ISOM_BOX_TYPE_BTRT;
return (GF_Box *)tmp;
}
|
Safe
|
[
"CWE-401"
] |
gpac
|
0a85029d694f992f3631e2f249e4999daee15cbf
|
1.2941863769465386e+38
| 8 |
fixed #1785 (fuzz)
| 0 |
SMB2_echo(struct TCP_Server_Info *server)
{
struct smb2_echo_req *req;
int rc = 0;
struct kvec iov[2];
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = 2 };
cifs_dbg(FYI, "In echo request\n");
if (server->tcpStatus == CifsNeedNegotiate) {
/* No need to send echo on newly established connections */
queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
if (rc)
return rc;
req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
/* 4 for rfc1002 length field */
iov[0].iov_len = 4;
iov[0].iov_base = (char *)req;
iov[1].iov_len = get_rfc1002_length(req);
iov[1].iov_base = (char *)req + 4;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
CIFS_ECHO_OP);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
cifs_small_buf_release(req);
return rc;
}
|
Safe
|
[
"CWE-476"
] |
linux
|
cabfb3680f78981d26c078a26e5c748531257ebb
|
2.8778163148498817e+38
| 36 |
CIFS: Enable encryption during session setup phase
In order to allow encryption on SMB connection we need to exchange
a session key and generate encryption and decryption keys.
Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
| 0 |
MagickExport const char *GetMagickDelegates(void)
{
return ""
#if defined(MAGICKCORE_AUTOTRACE_DELEGATE)
"autotrace "
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
"bzlib "
#endif
#if defined(MAGICKCORE_CAIRO_DELEGATE)
"cairo "
#endif
#if defined(MAGICKCORE_DJVU_DELEGATE)
"djvu "
#endif
#if defined(MAGICKCORE_DPS_DELEGATE)
"dps "
#endif
#if defined(MAGICKCORE_EMF_DELEGATE)
"emf "
#endif
#if defined(MAGICKCORE_FFTW_DELEGATE)
"fftw "
#endif
#if defined(MAGICKCORE_FONTCONFIG_DELEGATE)
"fontconfig "
#endif
#if defined(MAGICKCORE_FREETYPE_DELEGATE)
"freetype "
#endif
#if defined(MAGICKCORE_FPX_DELEGATE)
"fpx "
#endif
#if defined(MAGICKCORE_GS_DELEGATE)
"gslib "
#endif
#if defined(MAGICKCORE_GVC_DELEGATE)
"gvc "
#endif
#if defined(MAGICKCORE_JBIG_DELEGATE)
"jbig "
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE) && defined(MAGICKCORE_PNG_DELEGATE)
"jng "
#endif
#if defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
"jp2 "
#endif
#if defined(MAGICKCORE_JPEG_DELEGATE)
"jpeg "
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
"lcms "
#endif
#if defined(MAGICKCORE_LQR_DELEGATE)
"lqr "
#endif
#if defined(MAGICKCORE_LTDL_DELEGATE)
"ltdl "
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
"lzma "
#endif
#if defined(MAGICKCORE_OPENEXR_DELEGATE)
"openexr "
#endif
#if defined(MAGICKCORE_PANGOCAIRO_DELEGATE)
"pangocairo "
#endif
#if defined(MAGICKCORE_PNG_DELEGATE)
"png "
#endif
#if defined(MAGICKCORE_DPS_DELEGATE) || defined(MAGICKCORE_GS_DELEGATE) || defined(WIN32)
"ps "
#endif
#if defined(MAGICKCORE_RSVG_DELEGATE)
"rsvg "
#endif
#if defined(MAGICKCORE_TIFF_DELEGATE)
"tiff "
#endif
#if defined(MAGICKCORE_WEBP_DELEGATE)
"webp "
#endif
#if defined(MAGICKCORE_WMF_DELEGATE) || defined (MAGICKCORE_WMFLITE_DELEGATE)
"wmf "
#endif
#if defined(MAGICKCORE_X11_DELEGATE)
"x "
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
"xml "
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
"zlib"
#endif
;
}
|
Safe
|
[
"CWE-190",
"CWE-189",
"CWE-703"
] |
ImageMagick
|
0f6fc2d5bf8f500820c3dbcf0d23ee14f2d9f734
|
1.128251315860175e+38
| 98 | 0 |
|
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *entry;
int val;
*current_rb = trace_file->tr->trace_buffer.buffer;
if ((trace_file->flags &
(EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
if (val == 1) {
trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;
}
this_cpu_dec(trace_buffered_event_cnt);
}
entry = __trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
* to store the trace event for the tigger to use. It's recusive
* safe and will not be recorded anywhere.
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
entry = __trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
return entry;
}
|
Safe
|
[
"CWE-415"
] |
linux
|
4397f04575c44e1440ec2e49b6302785c95fd2f8
|
2.3116187278858638e+38
| 38 |
tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/20171226071253.8968-1-chunyan.zhang@spreadtrum.com
Cc: stable@vger.kernel.org
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <jing.xia@spreadtrum.com>
Reported-by: Chunyan Zhang <chunyan.zhang@spreadtrum.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
| 0 |
static void dns_connect(struct PgSocket *server)
{
struct sockaddr_un sa_un;
struct sockaddr_in sa_in;
struct sockaddr_in6 sa_in6;
struct sockaddr *sa;
struct PgDatabase *db = server->pool->db;
const char *host = db->host;
const char *unix_dir;
int sa_len;
if (!host || host[0] == '/') {
slog_noise(server, "unix socket: %s", sa_un.sun_path);
memset(&sa_un, 0, sizeof(sa_un));
sa_un.sun_family = AF_UNIX;
unix_dir = host ? host : cf_unix_socket_dir;
if (!unix_dir || !*unix_dir) {
log_error("Unix socket dir not configured: %s", db->name);
disconnect_server(server, false, "cannot connect");
return;
}
snprintf(sa_un.sun_path, sizeof(sa_un.sun_path),
"%s/.s.PGSQL.%d", unix_dir, db->port);
sa = (struct sockaddr *)&sa_un;
sa_len = sizeof(sa_un);
} else if (strchr(host, ':')) { // assume IPv6 address on any : in addr
slog_noise(server, "inet6 socket: %s", db->host);
memset(&sa_in6, 0, sizeof(sa_in6));
sa_in6.sin6_family = AF_INET6;
inet_pton(AF_INET6, db->host, (void *) sa_in6.sin6_addr.s6_addr);
sa_in6.sin6_port = htons(db->port);
sa = (struct sockaddr *)&sa_in6;
sa_len = sizeof(sa_in6);
} else if (host[0] >= '0' && host[0] <= '9') { // else try IPv4
slog_noise(server, "inet socket: %s", db->host);
memset(&sa_in, 0, sizeof(sa_in));
sa_in.sin_family = AF_INET;
sa_in.sin_addr.s_addr = inet_addr(db->host);
sa_in.sin_port = htons(db->port);
sa = (struct sockaddr *)&sa_in;
sa_len = sizeof(sa_in);
} else {
struct DNSToken *tk;
slog_noise(server, "dns socket: %s", db->host);
/* launch dns lookup */
tk = adns_resolve(adns, db->host, dns_callback, server);
if (tk)
server->dns_token = tk;
return;
}
connect_server(server, sa, sa_len);
}
|
Safe
|
[] |
pgbouncer
|
4b92112b820830b30cd7bc91bef3dd8f35305525
|
1.8822923441531185e+38
| 53 |
add_database: fail gracefully if too long db name
Truncating & adding can lead to fatal() later.
It was not an issue before, but with audodb (* in [databases] section)
the database name can some from network, thus allowing remote shutdown..
| 0 |
irc_server_get_number_buffer (struct t_irc_server *server,
int *server_pos, int *server_total)
{
struct t_irc_server *ptr_server;
*server_pos = 0;
*server_total = 0;
for (ptr_server = irc_servers; ptr_server;
ptr_server = ptr_server->next_server)
{
if (ptr_server->buffer)
{
(*server_total)++;
if (ptr_server == server)
*server_pos = *server_total;
}
}
}
|
Safe
|
[
"CWE-20"
] |
weechat
|
c265cad1c95b84abfd4e8d861f25926ef13b5d91
|
1.0938530700456086e+38
| 18 |
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
| 0 |
static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
int ret;
if (list_empty(&vsi->macvlan_list))
return;
list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
struct i40e_vsi *parent_vsi;
if (i40e_is_channel_macvlan(ch)) {
i40e_reset_ch_rings(vsi, ch);
clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
netdev_set_sb_channel(ch->fwd->netdev, 0);
kfree(ch->fwd);
ch->fwd = NULL;
}
list_del(&ch->list);
parent_vsi = ch->parent_vsi;
if (!parent_vsi || !ch->initialized) {
kfree(ch);
continue;
}
/* remove the VSI */
ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
NULL);
if (ret)
dev_err(&vsi->back->pdev->dev,
"unable to remove channel (%d) for parent VSI(%d)\n",
ch->seid, parent_vsi->seid);
kfree(ch);
}
vsi->macvlan_cnt = 0;
}
|
Safe
|
[
"CWE-400",
"CWE-401"
] |
linux
|
27d461333459d282ffa4a2bdb6b215a59d493a8f
|
9.989378216416106e+37
| 38 |
i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
| 0 |
void mm_drop_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
BUG_ON(mmap_read_trylock(mm));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_unlock_anon_vma(avc->anon_vma);
if (vma->vm_file && vma->vm_file->f_mapping)
vm_unlock_mapping(vma->vm_file->f_mapping);
}
mutex_unlock(&mm_all_locks_mutex);
}
|
Safe
|
[
"CWE-362"
] |
linux
|
246c320a8cfe0b11d81a4af38fa9985ef0cc9a4c
|
3.038393199633418e+38
| 18 |
mm/mmap.c: close race between munmap() and expand_upwards()/downwards()
VMA with VM_GROWSDOWN or VM_GROWSUP flag set can change their size under
mmap_read_lock(). It can lead to race with __do_munmap():
Thread A Thread B
__do_munmap()
detach_vmas_to_be_unmapped()
mmap_write_downgrade()
expand_downwards()
vma->vm_start = address;
// The VMA now overlaps with
// VMAs detached by the Thread A
// page fault populates expanded part
// of the VMA
unmap_region()
// Zaps pagetables partly
// populated by Thread B
Similar race exists for expand_upwards().
The fix is to avoid downgrading mmap_lock in __do_munmap() if detached
VMAs are next to VM_GROWSDOWN or VM_GROWSUP VMA.
[akpm@linux-foundation.org: s/mmap_sem/mmap_lock/ in comment]
Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Yang Shi <yang.shi@linux.alibaba.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: <stable@vger.kernel.org> [4.20+]
Link: http://lkml.kernel.org/r/20200709105309.42495-1-kirill.shutemov@linux.intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
static int loop_lookup(struct loop_device **l, int i)
{
struct loop_device *lo;
int ret = -ENODEV;
if (i < 0) {
int err;
err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
if (err == 1) {
*l = lo;
ret = lo->lo_number;
}
goto out;
}
/* lookup and return a specific i */
lo = idr_find(&loop_index_idr, i);
if (lo) {
*l = lo;
ret = lo->lo_number;
}
out:
return ret;
}
|
Safe
|
[
"CWE-416",
"CWE-362"
] |
linux
|
ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
|
5.07088282673307e+37
| 25 |
loop: fix concurrent lo_open/lo_release
范龙飞 reports that KASAN can report a use-after-free in __lock_acquire.
The reason is due to insufficient serialization in lo_release(), which
will continue to use the loop device even after it has decremented the
lo_refcnt to zero.
In the meantime, another process can come in, open the loop device
again as it is being shut down. Confusion ensues.
Reported-by: 范龙飞 <long7573@126.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| 0 |
sub_points_montgomery (mpi_point_t result,
mpi_point_t p1, mpi_point_t p2,
mpi_ec_t ctx)
{
(void)result;
(void)p1;
(void)p2;
(void)ctx;
log_fatal ("%s: %s not yet supported\n",
"_gcry_mpi_ec_sub_points", "Montgomery");
}
|
Safe
|
[
"CWE-200"
] |
libgcrypt
|
88e1358962e902ff1cbec8d53ba3eee46407851a
|
2.591982236161664e+38
| 11 |
ecc: Constant-time multiplication for Weierstrass curve.
* mpi/ec.c (_gcry_mpi_ec_mul_point): Use simple left-to-right binary
method for Weierstrass curve when SCALAR is secure.
| 0 |
static double mp_floor(_cimg_math_parser& mp) {
return std::floor(_mp_arg(2));
|
Safe
|
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
|
3.3613219877346697e+38
| 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
| 0 |
_copyTargetEntry(const TargetEntry *from)
{
TargetEntry *newnode = makeNode(TargetEntry);
COPY_NODE_FIELD(expr);
COPY_SCALAR_FIELD(resno);
COPY_STRING_FIELD(resname);
COPY_SCALAR_FIELD(ressortgroupref);
COPY_SCALAR_FIELD(resorigtbl);
COPY_SCALAR_FIELD(resorigcol);
COPY_SCALAR_FIELD(resjunk);
return newnode;
}
|
Safe
|
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
|
3.766131938500654e+37
| 14 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
| 0 |
static inline bool tcp_out_of_memory(struct sock *sk)
{
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
return true;
return false;
}
|
Safe
|
[
"CWE-416",
"CWE-269"
] |
linux
|
bb1fceca22492109be12640d49f5ea5a544c6bb4
|
2.1094119319971927e+38
| 7 |
tcp: fix use after free in tcp_xmit_retransmit_queue()
When tcp_sendmsg() allocates a fresh and empty skb, it puts it at the
tail of the write queue using tcp_add_write_queue_tail()
Then it attempts to copy user data into this fresh skb.
If the copy fails, we undo the work and remove the fresh skb.
Unfortunately, this undo lacks the change done to tp->highest_sack and
we can leave a dangling pointer (to a freed skb)
Later, tcp_xmit_retransmit_queue() can dereference this pointer and
access freed memory. For regular kernels where memory is not unmapped,
this might cause SACK bugs because tcp_highest_sack_seq() is buggy,
returning garbage instead of tp->snd_nxt, but with various debug
features like CONFIG_DEBUG_PAGEALLOC, this can crash the kernel.
This bug was found by Marco Grassi thanks to syzkaller.
Fixes: 6859d49475d4 ("[TCP]: Abstract tp->highest_sack accessing & point to next skb")
Reported-by: Marco Grassi <marco.gra@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
mac_deinit (digest_hd_st *td, opaque * res, int ver)
{
if (ver == GNUTLS_SSL3)
{ /* SSL 3.0 */
_gnutls_mac_deinit_ssl3 (td, res);
}
else
{
_gnutls_hmac_deinit (td, res);
}
}
|
Safe
|
[
"CWE-189"
] |
gnutls
|
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
|
2.3717312029069235e+37
| 11 |
Fix GNUTLS-SA-2008-1 security vulnerabilities.
See http://www.gnu.org/software/gnutls/security.html for updates.
| 0 |
static void usbtest_disconnect(struct usb_interface *intf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
kfree(dev->buf);
kfree(dev);
}
|
Safe
|
[
"CWE-401"
] |
linux
|
28ebeb8db77035e058a510ce9bd17c2b9a009dba
|
2.554623765865628e+38
| 9 |
usb: usbtest: fix missing kfree(dev->buf) in usbtest_disconnect
BUG: memory leak
unreferenced object 0xffff888055046e00 (size 256):
comm "kworker/2:9", pid 2570, jiffies 4294942129 (age 1095.500s)
hex dump (first 32 bytes):
00 70 04 55 80 88 ff ff 18 bb 5a 81 ff ff ff ff .p.U......Z.....
f5 96 78 81 ff ff ff ff 37 de 8e 81 ff ff ff ff ..x.....7.......
backtrace:
[<00000000d121dccf>] kmemleak_alloc_recursive
include/linux/kmemleak.h:43 [inline]
[<00000000d121dccf>] slab_post_alloc_hook mm/slab.h:586 [inline]
[<00000000d121dccf>] slab_alloc_node mm/slub.c:2786 [inline]
[<00000000d121dccf>] slab_alloc mm/slub.c:2794 [inline]
[<00000000d121dccf>] kmem_cache_alloc_trace+0x15e/0x2d0 mm/slub.c:2811
[<000000005c3c3381>] kmalloc include/linux/slab.h:555 [inline]
[<000000005c3c3381>] usbtest_probe+0x286/0x19d0
drivers/usb/misc/usbtest.c:2790
[<000000001cec6910>] usb_probe_interface+0x2bd/0x870
drivers/usb/core/driver.c:361
[<000000007806c118>] really_probe+0x48d/0x8f0 drivers/base/dd.c:551
[<00000000a3308c3e>] driver_probe_device+0xfc/0x2a0 drivers/base/dd.c:724
[<000000003ef66004>] __device_attach_driver+0x1b6/0x240
drivers/base/dd.c:831
[<00000000eee53e97>] bus_for_each_drv+0x14e/0x1e0 drivers/base/bus.c:431
[<00000000bb0648d0>] __device_attach+0x1f9/0x350 drivers/base/dd.c:897
[<00000000838b324a>] device_initial_probe+0x1a/0x20 drivers/base/dd.c:944
[<0000000030d501c1>] bus_probe_device+0x1e1/0x280 drivers/base/bus.c:491
[<000000005bd7adef>] device_add+0x131d/0x1c40 drivers/base/core.c:2504
[<00000000a0937814>] usb_set_configuration+0xe84/0x1ab0
drivers/usb/core/message.c:2030
[<00000000e3934741>] generic_probe+0x6a/0xe0 drivers/usb/core/generic.c:210
[<0000000098ade0f1>] usb_probe_device+0x90/0xd0
drivers/usb/core/driver.c:266
[<000000007806c118>] really_probe+0x48d/0x8f0 drivers/base/dd.c:551
[<00000000a3308c3e>] driver_probe_device+0xfc/0x2a0 drivers/base/dd.c:724
Acked-by: Alan Stern <stern@rowland.harvard.edu>
Reported-by: Kyungtae Kim <kt0755@gmail.com>
Signed-off-by: Zqiang <qiang.zhang@windriver.com>
Link: https://lore.kernel.org/r/20200612035210.20494-1-qiang.zhang@windriver.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
hivex_node_security (hive_h *h, hive_node_h node)
{
if (!IS_VALID_BLOCK (h, node) || !block_id_eq (h, node, "nk")) {
SET_ERRNO (EINVAL, "invalid block or not an 'nk' block");
return 0;
}
struct ntreg_nk_record *nk = (struct ntreg_nk_record *) (h->addr + node);
hive_node_h ret = le32toh (nk->sk);
ret += 0x1000;
if (!IS_VALID_BLOCK (h, ret)) {
SET_ERRNO (EFAULT, "invalid block");
return 0;
}
return ret;
}
|
Safe
|
[
"CWE-400"
] |
hivex
|
771728218dac2fbf6997a7e53225e75a4c6b7255
|
1.5384654232088638e+38
| 17 |
lib/node.c: Limit recursion in ri-records (CVE-2021-3622)
Windows Registry hive "ri"-records are arbitrarily nested B-tree-like
structures:
+-------------+
| ri |
|-------------|
| nr_offsets |
| offset[0] ------> points to another lf/lh/li/ri block
| offset[1] ------>
| offset[2] ------>
+-------------+
It is possible to construct a hive with a very deeply nested tree of
ri-records, causing the internal _get_children function to recurse to
any depth which can cause programs linked to hivex to crash with a
stack overflow.
Since it is not thought that deeply nested ri-records occur in real
hives, limit recursion depth. If you hit this limit you will see the
following error and the operation will return an error instead of
crashing:
\> ls
hivex: _get_children: returning EINVAL because: ri-record nested to depth >= 32
ls: Invalid argument
Thanks to Jeremy Galindo for finding and reporting this bug.
Reported-by: Jeremy Galindo, Sr Security Engineer, Datto.com
Signed-off-by: Richard W.M. Jones <rjones@redhat.com>
Fixes: CVE-2021-3622
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1975489
(cherry picked from commit 781a12c4a49dd81365c9c567c5aa5e19e894ba0e)
| 0 |
lib_file_open_search_with_combine(gs_file_path_ptr lib_path, const gs_memory_t *mem, i_ctx_t *i_ctx_p,
const char *fname, uint flen, char *buffer, int blen, uint *pclen, ref *pfile,
gx_io_device *iodev, bool starting_arg_file, char *fmode)
{
stream *s;
const gs_file_path *pfpath = lib_path;
uint pi;
int code = 1;
for (pi = 0; pi < r_size(&pfpath->list) && code == 1; ++pi) {
const ref *prdir = pfpath->list.value.refs + pi;
const char *pstr = (const char *)prdir->value.const_bytes;
uint plen = r_size(prdir), blen1 = blen;
gs_parsed_file_name_t pname;
gp_file_name_combine_result r;
/* We need to concatenate and parse the file name here
* if this path has a %device% prefix. */
if (pstr[0] == '%') {
/* We concatenate directly since gp_file_name_combine_*
* rules are not correct for other devices such as %rom% */
code = gs_parse_file_name(&pname, pstr, plen, mem);
if (code < 0) {
code = 1;
continue;
}
if (blen < max(pname.len, plen) + flen)
return_error(gs_error_limitcheck);
memcpy(buffer, pname.fname, pname.len);
memcpy(buffer+pname.len, fname, flen);
code = pname.iodev->procs.open_file(pname.iodev, buffer, pname.len + flen, fmode,
&s, (gs_memory_t *)mem);
if (code < 0) {
code = 1;
continue;
}
make_stream_file(pfile, s, "r");
/* fill in the buffer with the device concatenated */
memcpy(buffer, pstr, plen);
memcpy(buffer+plen, fname, flen);
*pclen = plen + flen;
code = 0;
} else {
r = gp_file_name_combine(pstr, plen,
fname, flen, false, buffer, &blen1);
if (r != gp_combine_success)
continue;
if (starting_arg_file || check_file_permissions(i_ctx_p, buffer,
blen1, iodev, "PermitFileReading") >= 0) {
if (iodev_os_open_file(iodev, (const char *)buffer, blen1,
(const char *)fmode, &s, (gs_memory_t *)mem) == 0) {
*pclen = blen1;
make_stream_file(pfile, s, "r");
code = 0;
}
}
else {
struct stat fstat;
/* If we are not allowed to open the file by check_file_permissions_aux()
* and if the file exists, throw an error.......
* Otherwise, keep searching.
*/
if ((*iodev->procs.file_status)(iodev, (const char *)buffer, &fstat) >= 0) {
code = gs_note_error(gs_error_invalidfileaccess);
}
}
}
}
return code;
}
|
Safe
|
[] |
ghostpdl
|
0d3901189f245232f0161addf215d7268c4d05a3
|
1.9477995818743163e+38
| 71 |
Bug 699657: properly apply file permissions to .tempfile
| 0 |
static ssize_t xdr_encode_bitmap4(struct xdr_stream *xdr,
const __u32 *bitmap, size_t len)
{
ssize_t ret;
/* Trim empty words */
while (len > 0 && bitmap[len-1] == 0)
len--;
ret = xdr_stream_encode_uint32_array(xdr, bitmap, len);
if (WARN_ON_ONCE(ret < 0))
return ret;
return len;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
b4487b93545214a9db8cbf32e86411677b0cca21
|
2.0768955201410055e+38
| 13 |
nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <jeffrey.mitchell@starlab.io>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
| 0 |
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
struct cgroup_subsys_state *css;
int err;
lockdep_assert_held(&cgroup_mutex);
css = ss->css_alloc(parent_css);
if (!css)
css = ERR_PTR(-ENOMEM);
if (IS_ERR(css))
return css;
init_and_link_css(css, ss, cgrp);
err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
if (err)
goto err_free_css;
err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
if (err < 0)
goto err_free_css;
css->id = err;
/* @css is ready to be brought online now, make it visible */
list_add_tail_rcu(&css->sibling, &parent_css->children);
cgroup_idr_replace(&ss->css_idr, css, css->id);
err = online_css(css);
if (err)
goto err_list_del;
return css;
err_list_del:
list_del_rcu(&css->sibling);
err_free_css:
list_del_rcu(&css->rstat_css_node);
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
return ERR_PTR(err);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
|
1.2901743880417433e+38
| 45 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: syzbot+cdb5dd11c97cc532efad@syzkaller.appspotmail.com
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Analyzed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220111232309.1786347-1-surenb@google.com
| 0 |
xfs_ifree_mark_inode_stale(
struct xfs_buf *bp,
struct xfs_inode *free_ip,
xfs_ino_t inum)
{
struct xfs_mount *mp = bp->b_mount;
struct xfs_perag *pag = bp->b_pag;
struct xfs_inode_log_item *iip;
struct xfs_inode *ip;
retry:
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
/* Inode not in memory, nothing to do */
if (!ip) {
rcu_read_unlock();
return;
}
/*
* because this is an RCU protected lookup, we could find a recently
* freed or even reallocated inode during the lookup. We need to check
* under the i_flags_lock for a valid inode here. Skip it if it is not
* valid, the wrong inode or stale.
*/
spin_lock(&ip->i_flags_lock);
if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
goto out_iflags_unlock;
/*
* Don't try to lock/unlock the current inode, but we _cannot_ skip the
* other inodes that we did not find in the list attached to the buffer
* and are not already marked stale. If we can't lock it, back off and
* retry.
*/
if (ip != free_ip) {
if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
delay(1);
goto retry;
}
}
ip->i_flags |= XFS_ISTALE;
/*
* If the inode is flushing, it is already attached to the buffer. All
* we needed to do here is mark the inode stale so buffer IO completion
* will remove it from the AIL.
*/
iip = ip->i_itemp;
if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
ASSERT(!list_empty(&iip->ili_item.li_bio_list));
ASSERT(iip->ili_last_fields);
goto out_iunlock;
}
/*
* Inodes not attached to the buffer can be released immediately.
* Everything else has to go through xfs_iflush_abort() on journal
* commit as the flock synchronises removal of the inode from the
* cluster buffer against inode reclaim.
*/
if (!iip || list_empty(&iip->ili_item.li_bio_list))
goto out_iunlock;
__xfs_iflags_set(ip, XFS_IFLUSHING);
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
/* we have a dirty inode in memory that has not yet been flushed. */
spin_lock(&iip->ili_lock);
iip->ili_last_fields = iip->ili_fields;
iip->ili_fields = 0;
iip->ili_fsync_fields = 0;
spin_unlock(&iip->ili_lock);
ASSERT(iip->ili_last_fields);
if (ip != free_ip)
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return;
out_iunlock:
if (ip != free_ip)
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_iflags_unlock:
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
}
|
Safe
|
[] |
linux
|
01ea173e103edd5ec41acec65b9261b87e123fc2
|
2.288755462264568e+38
| 90 |
xfs: fix up non-directory creation in SGID directories
XFS always inherits the SGID bit if it is set on the parent inode, while
the generic inode_init_owner does not do this in a few cases where it can
create a possible security problem, see commit 0fa3ecd87848
("Fix up non-directory creation in SGID directories") for details.
Switch XFS to use the generic helper for the normal path to fix this,
just keeping the simple field inheritance open coded for the case of the
non-sgid case with the bsdgrpid mount option.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reported-by: Christian Brauner <christian.brauner@ubuntu.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
| 0 |
static void ath10k_usb_flush_all(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
int i;
for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
if (ar_usb->pipes[i].ar_usb) {
usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
cancel_work_sync(&ar_usb->pipes[i].io_complete_work);
}
}
}
|
Safe
|
[
"CWE-476"
] |
linux
|
bfd6e6e6c5d2ee43a3d9902b36e01fc7527ebb27
|
3.239494020078753e+38
| 12 |
ath10k: Fix a NULL-ptr-deref bug in ath10k_usb_alloc_urb_from_pipe
The `ar_usb` field of `ath10k_usb_pipe_usb_pipe` objects
are initialized to point to the containing `ath10k_usb` object
according to endpoint descriptors read from the device side, as shown
below in `ath10k_usb_setup_pipe_resources`:
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
// get the address from endpoint descriptor
pipe_num = ath10k_usb_get_logical_pipe_num(ar_usb,
endpoint->bEndpointAddress,
&urbcount);
......
// select the pipe object
pipe = &ar_usb->pipes[pipe_num];
// initialize the ar_usb field
pipe->ar_usb = ar_usb;
}
The driver assumes that the addresses reported in endpoint
descriptors from device side to be complete. If a device is
malicious and does not report complete addresses, it may trigger
NULL-ptr-deref `ath10k_usb_alloc_urb_from_pipe` and
`ath10k_usb_free_urb_to_pipe`.
This patch fixes the bug by preventing potential NULL-ptr-deref.
Signed-off-by: Hui Peng <benquike@gmail.com>
Reported-by: Hui Peng <benquike@gmail.com>
Reported-by: Mathias Payer <mathias.payer@nebelwelt.net>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
[groeck: Add driver tag to subject, fix build warning]
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
| 0 |
static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
{
const LongTermPrediction *ltp = &sce->ics.ltp;
const uint16_t *offsets = sce->ics.swb_offset;
int i, sfb;
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
float *predTime = sce->ret;
float *predFreq = ac->buf_mdct;
int16_t num_samples = 2048;
if (ltp->lag < 1024)
num_samples = ltp->lag + 1024;
for (i = 0; i < num_samples; i++)
predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
memset(&predTime[i], 0, (2048 - i) * sizeof(float));
ac->windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
if (sce->tns.present)
ac->apply_tns(predFreq, &sce->tns, &sce->ics, 0);
for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
if (ltp->used[sfb])
for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
sce->coeffs[i] += predFreq[i];
}
}
|
Safe
|
[
"CWE-703"
] |
FFmpeg
|
6e42ccb9dbc13836cd52cda594f819d17af9afa2
|
2.375180391407128e+37
| 28 |
avcodec/aacdec: Fix pulse position checks in decode_pulses()
Fixes out of array read
Fixes: asan_static-oob_1efed25_1887_cov_2013541199_HeyYa_RA10_AAC_192K_30s.rm
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
| 0 |
find_inode_file (ino_t node_num, unsigned long major_num,
unsigned long minor_num)
{
struct inode_val *ival = find_inode_val (node_num, major_num, minor_num);
return ival ? ival->file_name : NULL;
}
|
Safe
|
[
"CWE-190"
] |
cpio
|
dd96882877721703e19272fe25034560b794061b
|
2.0797799974899565e+38
| 6 |
Rewrite dynamic string support.
* src/dstring.c (ds_init): Take a single argument.
(ds_free): New function.
(ds_resize): Take a single argument. Use x2nrealloc to expand
the storage.
(ds_reset,ds_append,ds_concat,ds_endswith): New function.
(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
* src/dstring.h (dynamic_string): Keep both the allocated length
(ds_size) and index of the next free byte in the string (ds_idx).
(ds_init,ds_resize): Change signature.
(ds_len): New macro.
(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
* src/copyin.c: Use new ds_ functions.
* src/copyout.c: Likewise.
* src/copypass.c: Likewise.
* src/util.c: Likewise.
| 0 |
static bool _pam_winbind_change_pwd(struct pwb_context *ctx)
{
return false;
}
|
Safe
|
[
"CWE-20"
] |
samba
|
f62683956a3b182f6a61cc7a2b4ada2e74cde243
|
3.1900267876243216e+38
| 4 |
fail authentication for single group name which cannot be converted to sid
furthermore if more than one name is supplied and no sid is converted
then also fail.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=8598
Signed-off-by: Noel Power <noel.power@suse.com>
Reviewed-by: Andreas Schneider <asn@samba.org>
Reviewed-by: David Disseldorp <ddiss@samba.org>
Autobuild-User(master): David Disseldorp <ddiss@samba.org>
Autobuild-Date(master): Fri Nov 29 15:45:11 CET 2013 on sn-devel-104
| 0 |
PHP_FUNCTION(ldap_t61_to_8859)
{
php_ldap_do_translate(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0);
}
|
Safe
|
[
"CWE-476"
] |
php-src
|
49782c54994ecca2ef2a061063bd5a7079c43527
|
3.2171100380880263e+38
| 4 |
Fix bug #76248 - Malicious LDAP-Server Response causes Crash
| 0 |
vte_sequence_handler_reverse_index (VteTerminal *terminal, GValueArray *params)
{
vte_sequence_handler_sr (terminal, params);
}
|
Safe
|
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
|
1.8001240617328674e+38
| 4 |
fix bug #548272
svn path=/trunk/; revision=2365
| 0 |
val_exp_sec_ctx_args(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
/* Initialize outputs. */
if (minor_status != NULL)
*minor_status = 0;
if (interprocess_token != GSS_C_NO_BUFFER) {
interprocess_token->length = 0;
interprocess_token->value = NULL;
}
/* Validate arguments. */
if (minor_status == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
if (context_handle == NULL || *context_handle == GSS_C_NO_CONTEXT)
return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
if (interprocess_token == GSS_C_NO_BUFFER)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
return (GSS_S_COMPLETE);
}
|
Safe
|
[
"CWE-415"
] |
krb5
|
56f7b1bc95a2a3eeb420e069e7655fb181ade5cf
|
4.454806769162271e+37
| 29 |
Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
| 0 |
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
int not_managed = 0;
int ret = 0;
LIST_HEAD(source);
for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (!get_page_unless_zero(page))
continue;
/*
* We can skip free pages. And we can only deal with pages on
* LRU.
*/
ret = isolate_lru_page(page);
if (!ret) { /* Success */
put_page(page);
list_add_tail(&page->lru, &source);
move_pages--;
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
} else {
#ifdef CONFIG_DEBUG_VM
printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
pfn);
dump_page(page);
#endif
put_page(page);
/* Because we don't have big zone->lock. we should
check this again here. */
if (page_count(page)) {
not_managed++;
ret = -EBUSY;
break;
}
}
}
if (!list_empty(&source)) {
if (not_managed) {
putback_lru_pages(&source);
goto out;
}
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
true, MIGRATE_SYNC);
if (ret)
putback_lru_pages(&source);
}
out:
return ret;
}
|
Safe
|
[] |
linux-2.6
|
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
|
2.78608124774385e+38
| 57 |
mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Signed-off-by: Jiang Liu <liuj97@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Keping Chen <chenkeping@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
GF_Err srpp_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s;
ISOM_DECREASE_SIZE(s, 16)
ptr->encryption_algorithm_rtp = gf_bs_read_u32(bs);
ptr->encryption_algorithm_rtcp = gf_bs_read_u32(bs);
ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs);
ptr->integrity_algorithm_rtcp = gf_bs_read_u32(bs);
return gf_isom_box_array_read(s, bs, srpp_on_child_box);
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
|
1.8847518920511367e+38
| 11 |
fixed #1587
| 0 |
bool DaemonServer::handle_command(MCommand *m)
{
Mutex::Locker l(lock);
int r = 0;
std::stringstream ss;
std::string prefix;
assert(lock.is_locked_by_me());
/**
* The working data for processing an MCommand. This lives in
* a class to enable passing it into other threads for processing
* outside of the thread/locks that called handle_command.
*/
class CommandContext
{
public:
MCommand *m;
bufferlist odata;
cmdmap_t cmdmap;
CommandContext(MCommand *m_)
: m(m_)
{
}
~CommandContext()
{
m->put();
}
void reply(int r, const std::stringstream &ss)
{
reply(r, ss.str());
}
void reply(int r, const std::string &rs)
{
// Let the connection drop as soon as we've sent our response
ConnectionRef con = m->get_connection();
if (con) {
con->mark_disposable();
}
dout(1) << "handle_command " << cpp_strerror(r) << " " << rs << dendl;
if (con) {
MCommandReply *reply = new MCommandReply(r, rs);
reply->set_tid(m->get_tid());
reply->set_data(odata);
con->send_message(reply);
}
}
};
/**
* A context for receiving a bufferlist/error string from a background
* function and then calling back to a CommandContext when it's done
*/
class ReplyOnFinish : public Context {
std::shared_ptr<CommandContext> cmdctx;
public:
bufferlist from_mon;
string outs;
ReplyOnFinish(std::shared_ptr<CommandContext> cmdctx_)
: cmdctx(cmdctx_)
{}
void finish(int r) override {
cmdctx->odata.claim_append(from_mon);
cmdctx->reply(r, outs);
}
};
std::shared_ptr<CommandContext> cmdctx = std::make_shared<CommandContext>(m);
MgrSessionRef session(static_cast<MgrSession*>(m->get_connection()->get_priv()));
if (!session) {
return true;
}
session->put(); // SessionRef takes a ref
if (session->inst.name == entity_name_t())
session->inst.name = m->get_source();
std::string format;
boost::scoped_ptr<Formatter> f;
map<string,string> param_str_map;
if (!cmdmap_from_json(m->cmd, &(cmdctx->cmdmap), ss)) {
cmdctx->reply(-EINVAL, ss);
return true;
}
{
cmd_getval(g_ceph_context, cmdctx->cmdmap, "format", format, string("plain"));
f.reset(Formatter::create(format));
}
cmd_getval(cct, cmdctx->cmdmap, "prefix", prefix);
dout(4) << "decoded " << cmdctx->cmdmap.size() << dendl;
dout(4) << "prefix=" << prefix << dendl;
if (prefix == "get_command_descriptions") {
dout(10) << "reading commands from python modules" << dendl;
const auto py_commands = py_modules.get_commands();
int cmdnum = 0;
JSONFormatter f;
f.open_object_section("command_descriptions");
auto dump_cmd = [&cmdnum, &f](const MonCommand &mc){
ostringstream secname;
secname << "cmd" << setfill('0') << std::setw(3) << cmdnum;
dump_cmddesc_to_json(&f, secname.str(), mc.cmdstring, mc.helpstring,
mc.module, mc.req_perms, mc.availability, 0);
cmdnum++;
};
for (const auto &pyc : py_commands) {
dump_cmd(pyc);
}
for (const auto &mgr_cmd : mgr_commands) {
dump_cmd(mgr_cmd);
}
f.close_section(); // command_descriptions
f.flush(cmdctx->odata);
cmdctx->reply(0, ss);
return true;
}
// lookup command
const MonCommand *mgr_cmd = _get_mgrcommand(prefix, mgr_commands);
_generate_command_map(cmdctx->cmdmap, param_str_map);
if (!mgr_cmd) {
MonCommand py_command = {"", "", "py", "rw", "cli"};
if (!_allowed_command(session.get(), py_command.module, prefix, cmdctx->cmdmap,
param_str_map, &py_command)) {
dout(1) << " access denied" << dendl;
ss << "access denied; does your client key have mgr caps?"
" See http://docs.ceph.com/docs/master/mgr/administrator/#client-authentication";
cmdctx->reply(-EACCES, ss);
return true;
}
} else {
// validate user's permissions for requested command
if (!_allowed_command(session.get(), mgr_cmd->module, prefix, cmdctx->cmdmap,
param_str_map, mgr_cmd)) {
dout(1) << " access denied" << dendl;
audit_clog->info() << "from='" << session->inst << "' "
<< "entity='" << session->entity_name << "' "
<< "cmd=" << m->cmd << ": access denied";
ss << "access denied' does your client key have mgr caps?"
" See http://docs.ceph.com/docs/master/mgr/administrator/#client-authentication";
cmdctx->reply(-EACCES, ss);
return true;
}
}
audit_clog->debug()
<< "from='" << session->inst << "' "
<< "entity='" << session->entity_name << "' "
<< "cmd=" << m->cmd << ": dispatch";
// ----------------
// service map commands
if (prefix == "service dump") {
if (!f)
f.reset(Formatter::create("json-pretty"));
cluster_state.with_servicemap([&](const ServiceMap &service_map) {
f->dump_object("service_map", service_map);
});
f->flush(cmdctx->odata);
cmdctx->reply(0, ss);
return true;
}
if (prefix == "service status") {
if (!f)
f.reset(Formatter::create("json-pretty"));
// only include state from services that are in the persisted service map
f->open_object_section("service_status");
ServiceMap s;
cluster_state.with_servicemap([&](const ServiceMap& service_map) {
s = service_map;
});
for (auto& p : s.services) {
f->open_object_section(p.first.c_str());
for (auto& q : p.second.daemons) {
f->open_object_section(q.first.c_str());
DaemonKey key(p.first, q.first);
assert(daemon_state.exists(key));
auto daemon = daemon_state.get(key);
Mutex::Locker l(daemon->lock);
f->dump_stream("status_stamp") << daemon->service_status_stamp;
f->dump_stream("last_beacon") << daemon->last_service_beacon;
f->open_object_section("status");
for (auto& r : daemon->service_status) {
f->dump_string(r.first.c_str(), r.second);
}
f->close_section();
f->close_section();
}
f->close_section();
}
f->close_section();
f->flush(cmdctx->odata);
cmdctx->reply(0, ss);
return true;
}
if (prefix == "config set") {
std::string key;
std::string val;
cmd_getval(cct, cmdctx->cmdmap, "key", key);
cmd_getval(cct, cmdctx->cmdmap, "value", val);
r = cct->_conf->set_val(key, val, true, &ss);
if (r == 0) {
cct->_conf->apply_changes(nullptr);
}
cmdctx->reply(0, ss);
return true;
}
// -----------
// PG commands
if (prefix == "pg scrub" ||
prefix == "pg repair" ||
prefix == "pg deep-scrub") {
string scrubop = prefix.substr(3, string::npos);
pg_t pgid;
string pgidstr;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "pgid", pgidstr);
if (!pgid.parse(pgidstr.c_str())) {
ss << "invalid pgid '" << pgidstr << "'";
cmdctx->reply(-EINVAL, ss);
return true;
}
bool pg_exists = false;
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
pg_exists = osdmap.pg_exists(pgid);
});
if (!pg_exists) {
ss << "pg " << pgid << " dne";
cmdctx->reply(-ENOENT, ss);
return true;
}
int acting_primary = -1;
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
acting_primary = osdmap.get_pg_acting_primary(pgid);
});
if (acting_primary == -1) {
ss << "pg " << pgid << " has no primary osd";
cmdctx->reply(-EAGAIN, ss);
return true;
}
auto p = osd_cons.find(acting_primary);
if (p == osd_cons.end()) {
ss << "pg " << pgid << " primary osd." << acting_primary
<< " is not currently connected";
cmdctx->reply(-EAGAIN, ss);
}
vector<pg_t> pgs = { pgid };
for (auto& con : p->second) {
con->send_message(new MOSDScrub(monc->get_fsid(),
pgs,
scrubop == "repair",
scrubop == "deep-scrub"));
}
ss << "instructing pg " << pgid << " on osd." << acting_primary
<< " to " << scrubop;
cmdctx->reply(0, ss);
return true;
} else if (prefix == "osd scrub" ||
prefix == "osd deep-scrub" ||
prefix == "osd repair") {
string whostr;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "who", whostr);
vector<string> pvec;
get_str_vec(prefix, pvec);
set<int> osds;
if (whostr == "*" || whostr == "all" || whostr == "any") {
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
for (int i = 0; i < osdmap.get_max_osd(); i++)
if (osdmap.is_up(i)) {
osds.insert(i);
}
});
} else {
long osd = parse_osd_id(whostr.c_str(), &ss);
if (osd < 0) {
ss << "invalid osd '" << whostr << "'";
cmdctx->reply(-EINVAL, ss);
return true;
}
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
if (osdmap.is_up(osd)) {
osds.insert(osd);
}
});
if (osds.empty()) {
ss << "osd." << osd << " is not up";
cmdctx->reply(-EAGAIN, ss);
return true;
}
}
set<int> sent_osds, failed_osds;
for (auto osd : osds) {
auto p = osd_cons.find(osd);
if (p == osd_cons.end()) {
failed_osds.insert(osd);
} else {
sent_osds.insert(osd);
for (auto& con : p->second) {
con->send_message(new MOSDScrub(monc->get_fsid(),
pvec.back() == "repair",
pvec.back() == "deep-scrub"));
}
}
}
if (failed_osds.size() == osds.size()) {
ss << "failed to instruct osd(s) " << osds << " to " << pvec.back()
<< " (not connected)";
r = -EAGAIN;
} else {
ss << "instructed osd(s) " << sent_osds << " to " << pvec.back();
if (!failed_osds.empty()) {
ss << "; osd(s) " << failed_osds << " were not connected";
}
r = 0;
}
cmdctx->reply(0, ss);
return true;
} else if (prefix == "osd reweight-by-pg" ||
prefix == "osd reweight-by-utilization" ||
prefix == "osd test-reweight-by-pg" ||
prefix == "osd test-reweight-by-utilization") {
bool by_pg =
prefix == "osd reweight-by-pg" || prefix == "osd test-reweight-by-pg";
bool dry_run =
prefix == "osd test-reweight-by-pg" ||
prefix == "osd test-reweight-by-utilization";
int64_t oload;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "oload", oload, int64_t(120));
set<int64_t> pools;
vector<string> poolnames;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "pools", poolnames);
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
for (const auto& poolname : poolnames) {
int64_t pool = osdmap.lookup_pg_pool_name(poolname);
if (pool < 0) {
ss << "pool '" << poolname << "' does not exist";
r = -ENOENT;
}
pools.insert(pool);
}
});
if (r) {
cmdctx->reply(r, ss);
return true;
}
double max_change = g_conf->mon_reweight_max_change;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_change", max_change);
if (max_change <= 0.0) {
ss << "max_change " << max_change << " must be positive";
cmdctx->reply(-EINVAL, ss);
return true;
}
int64_t max_osds = g_conf->mon_reweight_max_osds;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "max_osds", max_osds);
if (max_osds <= 0) {
ss << "max_osds " << max_osds << " must be positive";
cmdctx->reply(-EINVAL, ss);
return true;
}
string no_increasing;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "no_increasing", no_increasing);
string out_str;
mempool::osdmap::map<int32_t, uint32_t> new_weights;
r = cluster_state.with_pgmap([&](const PGMap& pgmap) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
return reweight::by_utilization(osdmap, pgmap,
oload,
max_change,
max_osds,
by_pg,
pools.empty() ? NULL : &pools,
no_increasing == "--no-increasing",
&new_weights,
&ss, &out_str, f.get());
});
});
if (r >= 0) {
dout(10) << "reweight::by_utilization: finished with " << out_str << dendl;
}
if (f) {
f->flush(cmdctx->odata);
} else {
cmdctx->odata.append(out_str);
}
if (r < 0) {
ss << "FAILED reweight-by-pg";
cmdctx->reply(r, ss);
return true;
} else if (r == 0 || dry_run) {
ss << "no change";
cmdctx->reply(r, ss);
return true;
} else {
json_spirit::Object json_object;
for (const auto& osd_weight : new_weights) {
json_spirit::Config::add(json_object,
std::to_string(osd_weight.first),
std::to_string(osd_weight.second));
}
string s = json_spirit::write(json_object);
std::replace(begin(s), end(s), '\"', '\'');
const string cmd =
"{"
"\"prefix\": \"osd reweightn\", "
"\"weights\": \"" + s + "\""
"}";
auto on_finish = new ReplyOnFinish(cmdctx);
monc->start_mon_command({cmd}, {},
&on_finish->from_mon, &on_finish->outs, on_finish);
return true;
}
} else if (prefix == "osd df") {
string method;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "output_method", method);
r = cluster_state.with_pgservice([&](const PGMapStatService& pgservice) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
print_osd_utilization(osdmap, &pgservice, ss,
f.get(), method == "tree");
cmdctx->odata.append(ss);
return 0;
});
});
cmdctx->reply(r, "");
return true;
} else if (prefix == "osd safe-to-destroy") {
vector<string> ids;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "ids", ids);
set<int> osds;
int r;
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
r = osdmap.parse_osd_id_list(ids, &osds, &ss);
});
if (!r && osds.empty()) {
ss << "must specify one or more OSDs";
r = -EINVAL;
}
if (r < 0) {
cmdctx->reply(r, ss);
return true;
}
set<int> active_osds, missing_stats, stored_pgs;
int affected_pgs = 0;
cluster_state.with_pgmap([&](const PGMap& pg_map) {
if (pg_map.num_pg_unknown > 0) {
ss << pg_map.num_pg_unknown << " pgs have unknown state; cannot draw"
<< " any conclusions";
r = -EAGAIN;
return;
}
int num_active_clean = 0;
for (auto& p : pg_map.num_pg_by_state) {
unsigned want = PG_STATE_ACTIVE|PG_STATE_CLEAN;
if ((p.first & want) == want) {
num_active_clean += p.second;
}
}
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
for (auto osd : osds) {
if (!osdmap.exists(osd)) {
continue; // clearly safe to destroy
}
auto q = pg_map.num_pg_by_osd.find(osd);
if (q != pg_map.num_pg_by_osd.end()) {
if (q->second.acting > 0 || q->second.up > 0) {
active_osds.insert(osd);
affected_pgs += q->second.acting + q->second.up;
continue;
}
}
if (num_active_clean < pg_map.num_pg) {
// all pgs aren't active+clean; we need to be careful.
auto p = pg_map.osd_stat.find(osd);
if (p == pg_map.osd_stat.end()) {
missing_stats.insert(osd);
}
if (p->second.num_pgs > 0) {
stored_pgs.insert(osd);
}
}
}
});
});
if (!r && !active_osds.empty()) {
ss << "OSD(s) " << active_osds << " have " << affected_pgs
<< " pgs currently mapped to them";
r = -EBUSY;
} else if (!missing_stats.empty()) {
ss << "OSD(s) " << missing_stats << " have no reported stats, and not all"
<< " PGs are active+clean; we cannot draw any conclusions";
r = -EAGAIN;
} else if (!stored_pgs.empty()) {
ss << "OSD(s) " << stored_pgs << " last reported they still store some PG"
<< " data, and not all PGs are active+clean; we cannot be sure they"
<< " aren't still needed.";
r = -EBUSY;
}
if (r) {
cmdctx->reply(r, ss);
return true;
}
ss << "OSD(s) " << osds << " are safe to destroy without reducing data"
<< " durability.";
cmdctx->reply(0, ss);
return true;
} else if (prefix == "osd ok-to-stop") {
vector<string> ids;
cmd_getval(g_ceph_context, cmdctx->cmdmap, "ids", ids);
set<int> osds;
int r;
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
r = osdmap.parse_osd_id_list(ids, &osds, &ss);
});
if (!r && osds.empty()) {
ss << "must specify one or more OSDs";
r = -EINVAL;
}
if (r < 0) {
cmdctx->reply(r, ss);
return true;
}
map<pg_t,int> pg_delta; // pgid -> net acting set size change
int dangerous_pgs = 0;
cluster_state.with_pgmap([&](const PGMap& pg_map) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
if (pg_map.num_pg_unknown > 0) {
ss << pg_map.num_pg_unknown << " pgs have unknown state; "
<< "cannot draw any conclusions";
r = -EAGAIN;
return;
}
for (auto osd : osds) {
auto p = pg_map.pg_by_osd.find(osd);
if (p != pg_map.pg_by_osd.end()) {
for (auto& pgid : p->second) {
--pg_delta[pgid];
}
}
}
for (auto& p : pg_delta) {
auto q = pg_map.pg_stat.find(p.first);
if (q == pg_map.pg_stat.end()) {
ss << "missing information about " << p.first << "; cannot draw"
<< " any conclusions";
r = -EAGAIN;
return;
}
if (!(q->second.state & PG_STATE_ACTIVE) ||
(q->second.state & PG_STATE_DEGRADED)) {
// we don't currently have a good way to tell *how* degraded
// a degraded PG is, so we have to assume we cannot remove
// any more replicas/shards.
++dangerous_pgs;
continue;
}
const pg_pool_t *pi = osdmap.get_pg_pool(p.first.pool());
if (!pi) {
++dangerous_pgs; // pool is creating or deleting
} else {
if (q->second.acting.size() + p.second < pi->min_size) {
++dangerous_pgs;
}
}
}
});
});
if (r) {
cmdctx->reply(r, ss);
return true;
}
if (dangerous_pgs) {
ss << dangerous_pgs << " PGs are already degraded or might become "
<< "unavailable";
cmdctx->reply(-EBUSY, ss);
return true;
}
ss << "OSD(s) " << osds << " are ok to stop without reducing"
<< " availability, provided there are no other concurrent failures"
<< " or interventions. " << pg_delta.size() << " PGs are likely to be"
<< " degraded (but remain available) as a result.";
cmdctx->reply(0, ss);
return true;
} else if (prefix == "pg force-recovery" ||
prefix == "pg force-backfill" ||
prefix == "pg cancel-force-recovery" ||
prefix == "pg cancel-force-backfill") {
string forceop = prefix.substr(3, string::npos);
list<pg_t> parsed_pgs;
map<int, list<pg_t> > osdpgs;
// figure out actual op just once
int actual_op = 0;
if (forceop == "force-recovery") {
actual_op = OFR_RECOVERY;
} else if (forceop == "force-backfill") {
actual_op = OFR_BACKFILL;
} else if (forceop == "cancel-force-backfill") {
actual_op = OFR_BACKFILL | OFR_CANCEL;
} else if (forceop == "cancel-force-recovery") {
actual_op = OFR_RECOVERY | OFR_CANCEL;
}
// covnert pg names to pgs, discard any invalid ones while at it
{
// we don't want to keep pgidstr and pgidstr_nodup forever
vector<string> pgidstr;
// get pgids to process and prune duplicates
cmd_getval(g_ceph_context, cmdctx->cmdmap, "pgid", pgidstr);
set<string> pgidstr_nodup(pgidstr.begin(), pgidstr.end());
if (pgidstr.size() != pgidstr_nodup.size()) {
// move elements only when there were duplicates, as this
// reorders them
pgidstr.resize(pgidstr_nodup.size());
auto it = pgidstr_nodup.begin();
for (size_t i = 0 ; i < pgidstr_nodup.size(); i++) {
pgidstr[i] = std::move(*it++);
}
}
cluster_state.with_pgmap([&](const PGMap& pg_map) {
for (auto& pstr : pgidstr) {
pg_t parsed_pg;
if (!parsed_pg.parse(pstr.c_str())) {
ss << "invalid pgid '" << pstr << "'; ";
r = -EINVAL;
} else {
auto workit = pg_map.pg_stat.find(parsed_pg);
if (workit == pg_map.pg_stat.end()) {
ss << "pg " << pstr << " does not exist; ";
r = -ENOENT;
} else {
pg_stat_t workpg = workit->second;
// discard pgs for which user requests are pointless
switch (actual_op)
{
case OFR_RECOVERY:
if ((workpg.state & (PG_STATE_DEGRADED | PG_STATE_RECOVERY_WAIT | PG_STATE_RECOVERING)) == 0) {
// don't return error, user script may be racing with cluster. not fatal.
ss << "pg " << pstr << " doesn't require recovery; ";
continue;
} else if (workpg.state & PG_STATE_FORCED_RECOVERY) {
ss << "pg " << pstr << " recovery already forced; ";
// return error, as it may be a bug in user script
r = -EINVAL;
continue;
}
break;
case OFR_BACKFILL:
if ((workpg.state & (PG_STATE_DEGRADED | PG_STATE_BACKFILL_WAIT | PG_STATE_BACKFILLING)) == 0) {
ss << "pg " << pstr << " doesn't require backfilling; ";
continue;
} else if (workpg.state & PG_STATE_FORCED_BACKFILL) {
ss << "pg " << pstr << " backfill already forced; ";
r = -EINVAL;
continue;
}
break;
case OFR_BACKFILL | OFR_CANCEL:
if ((workpg.state & PG_STATE_FORCED_BACKFILL) == 0) {
ss << "pg " << pstr << " backfill not forced; ";
continue;
}
break;
case OFR_RECOVERY | OFR_CANCEL:
if ((workpg.state & PG_STATE_FORCED_RECOVERY) == 0) {
ss << "pg " << pstr << " recovery not forced; ";
continue;
}
break;
default:
assert(0 == "actual_op value is not supported");
}
parsed_pgs.push_back(std::move(parsed_pg));
}
}
}
// group pgs to process by osd
for (auto& pgid : parsed_pgs) {
auto workit = pg_map.pg_stat.find(pgid);
if (workit != pg_map.pg_stat.end()) {
pg_stat_t workpg = workit->second;
set<int32_t> osds(workpg.up.begin(), workpg.up.end());
osds.insert(workpg.acting.begin(), workpg.acting.end());
for (auto i : osds) {
osdpgs[i].push_back(pgid);
}
}
}
});
}
// respond with error only when no pgs are correct
// yes, in case of mixed errors, only the last one will be emitted,
// but the message presented will be fine
if (parsed_pgs.size() != 0) {
// clear error to not confuse users/scripts
r = 0;
}
// optimize the command -> messages conversion, use only one message per distinct OSD
cluster_state.with_osdmap([&](const OSDMap& osdmap) {
for (auto& i : osdpgs) {
if (osdmap.is_up(i.first)) {
vector<pg_t> pgvec(make_move_iterator(i.second.begin()), make_move_iterator(i.second.end()));
auto p = osd_cons.find(i.first);
if (p == osd_cons.end()) {
ss << "osd." << i.first << " is not currently connected";
r = -EAGAIN;
continue;
}
for (auto& con : p->second) {
con->send_message(new MOSDForceRecovery(monc->get_fsid(), pgvec, actual_op));
}
ss << "instructing pg(s) " << i.second << " on osd." << i.first << " to " << forceop << "; ";
}
}
});
ss << std::endl;
cmdctx->reply(r, ss);
return true;
} else {
r = cluster_state.with_pgmap([&](const PGMap& pg_map) {
return cluster_state.with_osdmap([&](const OSDMap& osdmap) {
return process_pg_map_command(prefix, cmdctx->cmdmap, pg_map, osdmap,
f.get(), &ss, &cmdctx->odata);
});
});
if (r != -EOPNOTSUPP) {
cmdctx->reply(r, ss);
return true;
}
}
// None of the special native commands,
ActivePyModule *handler = nullptr;
auto py_commands = py_modules.get_py_commands();
for (const auto &pyc : py_commands) {
auto pyc_prefix = cmddesc_get_prefix(pyc.cmdstring);
dout(1) << "pyc_prefix: '" << pyc_prefix << "'" << dendl;
if (pyc_prefix == prefix) {
handler = pyc.handler;
break;
}
}
if (handler == nullptr) {
ss << "No handler found for '" << prefix << "'";
dout(4) << "No handler found for '" << prefix << "'" << dendl;
cmdctx->reply(-EINVAL, ss);
return true;
} else {
// Okay, now we have a handler to call, but we must not call it
// in this thread, because the python handlers can do anything,
// including blocking, and including calling back into mgr.
dout(4) << "passing through " << cmdctx->cmdmap.size() << dendl;
finisher.queue(new FunctionContext([cmdctx, handler](int r_) {
std::stringstream ds;
std::stringstream ss;
int r = handler->handle_command(cmdctx->cmdmap, &ds, &ss);
cmdctx->odata.append(ds);
cmdctx->reply(r, ss);
}));
return true;
}
}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
2.3238767019840296e+38
| 789 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
int Curl_mbedtls_init(void)
{
return Curl_polarsslthreadlock_thread_setup();
}
|
Safe
|
[
"CWE-20"
] |
curl
|
6efd2fa529a189bf41736a610f6184cd8ad94b4d
|
7.5333873893233825e+37
| 4 |
mbedtls/polarssl: set "hostname" unconditionally
...as otherwise the TLS libs will skip the CN/SAN check and just allow
connection to any server. curl previously skipped this function when SNI
wasn't used or when connecting to an IP address specified host.
CVE-2016-3739
Bug: https://curl.haxx.se/docs/adv_20160518A.html
Reported-by: Moti Avrahami
| 0 |
void ZrtpQueue::goClearOk() { }
|
Safe
|
[
"CWE-119"
] |
ZRTPCPP
|
c8617100f359b217a974938c5539a1dd8a120b0e
|
2.1144865380408698e+38
| 1 |
Fix vulnerabilities found and reported by Mark Dowd
- limit length of memcpy
- limit number of offered algorithms in Hello packet
- length check in PING packet
- fix a small coding error
| 0 |
static void parse_segment_info(VP8Context *s)
{
VP56RangeCoder *c = &s->c;
int i;
s->segmentation.update_map = vp8_rac_get(c);
if (vp8_rac_get(c)) { // update segment feature data
s->segmentation.absolute_vals = vp8_rac_get(c);
for (i = 0; i < 4; i++)
s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
for (i = 0; i < 4; i++)
s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
}
if (s->segmentation.update_map)
for (i = 0; i < 3; i++)
s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef
|
2.8931538140613097e+37
| 20 |
avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <rsbultje@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
| 0 |
bit_write_BD (Bit_Chain *dat, double value)
{
if (value == 0.0)
bit_write_BB (dat, 2);
else if (value == 1.0)
bit_write_BB (dat, 1);
else
{
bit_write_BB (dat, 0);
bit_write_RD (dat, value);
}
}
|
Safe
|
[
"CWE-703",
"CWE-125"
] |
libredwg
|
95cc9300430d35feb05b06a9badf678419463dbe
|
2.397344462088461e+38
| 12 |
encode: protect from stack under-flow
From GH #178 fuzzing
| 0 |
static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{
int i = 0;
do {
if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
* 2) << 1)) {
netdev_printk(KERN_DEBUG, qdev->ndev,
"driver lock acquired\n");
return 1;
}
ssleep(1);
} while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0;
}
|
Safe
|
[
"CWE-401"
] |
linux
|
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
|
2.779524448264242e+38
| 19 |
net: qlogic: Fix memory leak in ql_alloc_large_buffers
In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb.
This skb should be released if pci_dma_mapping_error fails.
Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()")
Signed-off-by: Navid Emamdoost <navid.emamdoost@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
OJPEGPreDecodeSkipScanlines(TIFF* tif)
{
static const char module[]="OJPEGPreDecodeSkipScanlines";
OJPEGState* sp=(OJPEGState*)tif->tif_data;
uint32 m;
if (sp->skip_buffer==NULL)
{
sp->skip_buffer=_TIFFmalloc(sp->bytes_per_line);
if (sp->skip_buffer==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
return(0);
}
}
for (m=0; m<sp->lines_per_strile; m++)
{
if (jpeg_read_scanlines_encap(sp,&(sp->libjpeg_jpeg_decompress_struct),&sp->skip_buffer,1)==0)
return(0);
}
return(1);
}
|
Safe
|
[
"CWE-369"
] |
libtiff
|
43bc256d8ae44b92d2734a3c5bc73957a4d7c1ec
|
2.5718959285333062e+38
| 21 |
* libtiff/tif_ojpeg.c: make OJPEGDecode() early exit in case of failure in
OJPEGPreDecode(). This will avoid a divide by zero, and potential other issues.
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2611
| 0 |
int update_server_info(int force)
{
/* We would add more dumb-server support files later,
* including index of available pack files and their
* intended audiences.
*/
int errs = 0;
errs = errs | update_info_refs(force);
errs = errs | update_info_packs(force);
/* remove leftover rev-cache file if there is any */
unlink_or_warn(git_path("info/rev-cache"));
return errs;
}
|
Safe
|
[] |
git
|
c173dad58787a7f11a526dbcdaa5a2fe9ff1c87f
|
8.617947661226043e+36
| 16 |
update-server-info: Shorten read_pack_info_file()
The correct responses to a D and a T line in .git/objects/info/packs
are the same, so combine their case arms. In both cases we already
‘goto’ out of the switch so while at it, remove a redundant ‘break’
to avoid yet another line of code.
Signed-off-by: Ralf Thielow <ralf.thielow@googlemail.com>
Reviewed-by: Jonathan Nieder <jrnieder <at> gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
| 0 |
static gboolean rpc_event_dispatch(GSource *source, GSourceFunc callback, gpointer connection)
{
return rpc_dispatch(connection) != RPC_ERROR_CONNECTION_CLOSED;
}
|
Safe
|
[
"CWE-264"
] |
nspluginwrapper
|
7e4ab8e1189846041f955e6c83f72bc1624e7a98
|
1.222875961909441e+38
| 4 |
Support all the new variables added
| 0 |
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
return af_alg_accept(sock->sk, newsock, kern);
}
|
Safe
|
[
"CWE-416"
] |
linux
|
9060cb719e61b685ec0102574e10337fa5f445ea
|
1.4471033067971713e+38
| 5 |
net: crypto set sk to NULL when af_alg_release.
KASAN has found use-after-free in sockfs_setattr.
The existed commit 6d8c50dcb029 ("socket: close race condition between sock_close()
and sockfs_setattr()") is to fix this simillar issue, but it seems to ignore
that crypto module forgets to set the sk to NULL after af_alg_release.
KASAN report details as below:
BUG: KASAN: use-after-free in sockfs_setattr+0x120/0x150
Write of size 4 at addr ffff88837b956128 by task syz-executor0/4186
CPU: 2 PID: 4186 Comm: syz-executor0 Not tainted xxx + #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.10.2-1ubuntu1 04/01/2014
Call Trace:
dump_stack+0xca/0x13e
print_address_description+0x79/0x330
? vprintk_func+0x5e/0xf0
kasan_report+0x18a/0x2e0
? sockfs_setattr+0x120/0x150
sockfs_setattr+0x120/0x150
? sock_register+0x2d0/0x2d0
notify_change+0x90c/0xd40
? chown_common+0x2ef/0x510
chown_common+0x2ef/0x510
? chmod_common+0x3b0/0x3b0
? __lock_is_held+0xbc/0x160
? __sb_start_write+0x13d/0x2b0
? __mnt_want_write+0x19a/0x250
do_fchownat+0x15c/0x190
? __ia32_sys_chmod+0x80/0x80
? trace_hardirqs_on_thunk+0x1a/0x1c
__x64_sys_fchownat+0xbf/0x160
? lockdep_hardirqs_on+0x39a/0x5e0
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462589
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89
ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3
48 c7 c1 bc ff ff
ff f7 d8 64 89 01 48
RSP: 002b:00007fb4b2c83c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000104
RAX: ffffffffffffffda RBX: 000000000072bfa0 RCX: 0000000000462589
RDX: 0000000000000000 RSI: 00000000200000c0 RDI: 0000000000000007
RBP: 0000000000000005 R08: 0000000000001000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb4b2c846bc
R13: 00000000004bc733 R14: 00000000006f5138 R15: 00000000ffffffff
Allocated by task 4185:
kasan_kmalloc+0xa0/0xd0
__kmalloc+0x14a/0x350
sk_prot_alloc+0xf6/0x290
sk_alloc+0x3d/0xc00
af_alg_accept+0x9e/0x670
hash_accept+0x4a3/0x650
__sys_accept4+0x306/0x5c0
__x64_sys_accept4+0x98/0x100
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 4184:
__kasan_slab_free+0x12e/0x180
kfree+0xeb/0x2f0
__sk_destruct+0x4e6/0x6a0
sk_destruct+0x48/0x70
__sk_free+0xa9/0x270
sk_free+0x2a/0x30
af_alg_release+0x5c/0x70
__sock_release+0xd3/0x280
sock_close+0x1a/0x20
__fput+0x27f/0x7f0
task_work_run+0x136/0x1b0
exit_to_usermode_loop+0x1a7/0x1d0
do_syscall_64+0x461/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Syzkaller reproducer:
r0 = perf_event_open(&(0x7f0000000000)={0x0, 0x70, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @perf_config_ext}, 0x0, 0x0,
0xffffffffffffffff, 0x0)
r1 = socket$alg(0x26, 0x5, 0x0)
getrusage(0x0, 0x0)
bind(r1, &(0x7f00000001c0)=@alg={0x26, 'hash\x00', 0x0, 0x0,
'sha256-ssse3\x00'}, 0x80)
r2 = accept(r1, 0x0, 0x0)
r3 = accept4$unix(r2, 0x0, 0x0, 0x0)
r4 = dup3(r3, r0, 0x0)
fchownat(r4, &(0x7f00000000c0)='\x00', 0x0, 0x0, 0x1000)
Fixes: 6d8c50dcb029 ("socket: close race condition between sock_close() and sockfs_setattr()")
Signed-off-by: Mao Wenan <maowenan@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| 0 |
static int ssl_build_inner_plaintext( unsigned char *content,
size_t *content_size,
size_t remaining,
uint8_t rec_type,
size_t pad )
{
size_t len = *content_size;
/* Write real content type */
if( remaining == 0 )
return( -1 );
content[ len ] = rec_type;
len++;
remaining--;
if( remaining < pad )
return( -1 );
memset( content + len, 0, pad );
len += pad;
remaining -= pad;
*content_size = len;
return( 0 );
}
|
Safe
|
[
"CWE-787"
] |
mbedtls
|
f333dfab4a6c2d8a604a61558a8f783145161de4
|
2.46007521755175e+38
| 24 |
More SSL debug messages for ClientHello parsing
In particular, be verbose when checking the ClientHello cookie in a possible
DTLS reconnection.
Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com>
| 0 |
ccss_start_selector (CRDocHandler * a_handler, CRSelector * a_selector_list)
{
CSSUserData *user_data;
g_return_if_fail (a_handler);
user_data = (CSSUserData *) a_handler->app_data;
cr_selector_ref (a_selector_list);
user_data->selector = a_selector_list;
}
|
Safe
|
[
"CWE-20"
] |
librsvg
|
d1c9191949747f6dcfd207831d15dd4ba00e31f2
|
1.8488970434422243e+38
| 10 |
state: Store mask as reference
Instead of immediately looking up the mask, store the reference and look
it up on use.
| 0 |
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
if (!fw_priv || fw_sysfs_done(fw_priv)) {
retval = -ENODEV;
goto out;
}
if (fw_priv->data) {
if (offset + count > fw_priv->allocated_size) {
retval = -ENOMEM;
goto out;
}
firmware_rw_data(fw_priv, buffer, offset, count, false);
retval = count;
} else {
retval = fw_realloc_pages(fw_sysfs, offset + count);
if (retval)
goto out;
retval = count;
firmware_rw(fw_priv, buffer, offset, count, false);
}
fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
out:
mutex_unlock(&fw_lock);
return retval;
}
|
Safe
|
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
|
7.89879698802044e+37
| 40 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <joe@perches.com>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
| 0 |
input_restore_state(struct input_ctx *ictx)
{
struct screen_write_ctx *sctx = &ictx->ctx;
memcpy(&ictx->cell, &ictx->old_cell, sizeof ictx->cell);
if (ictx->old_mode & MODE_ORIGIN)
screen_write_mode_set(sctx, MODE_ORIGIN);
else
screen_write_mode_clear(sctx, MODE_ORIGIN);
screen_write_cursormove(sctx, ictx->old_cx, ictx->old_cy, 0);
}
|
Safe
|
[
"CWE-787"
] |
tmux
|
a868bacb46e3c900530bed47a1c6f85b0fbe701c
|
2.0288325544830635e+38
| 11 |
Do not write after the end of the array and overwrite the stack when
colon-separated SGR sequences contain empty arguments. Reported by Sergey
Nizovtsev.
| 0 |
work_state_populate (EContact *card,
gchar **values)
{
EContactAddress *contact_addr = getormakeEContactAddress (card, E_CONTACT_ADDRESS_WORK);
contact_addr->region = g_strdup (values[0]);
e_contact_set (card, E_CONTACT_ADDRESS_WORK, contact_addr);
e_contact_address_free (contact_addr);
}
|
Safe
|
[] |
evolution-data-server
|
34bad61738e2127736947ac50e0c7969cc944972
|
2.2889157892726773e+38
| 8 |
Bug 796174 - strcat() considered unsafe for buffer overflow
| 0 |
GF_Err pcmC_box_write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_PCMConfigBox *ptr = (GF_PCMConfigBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->format_flags);
gf_bs_write_u8(bs, ptr->PCM_sample_size);
return GF_OK;
}
|
Safe
|
[
"CWE-787"
] |
gpac
|
77510778516803b7f7402d7423c6d6bef50254c3
|
4.333244297102992e+37
| 11 |
fixed #2255
| 0 |
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED_FROZEN:
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
num_cpus_frozen--;
if (likely(num_cpus_frozen)) {
partition_sched_domains(1, NULL, NULL);
break;
}
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus(true);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
|
Safe
|
[
"CWE-200"
] |
linux
|
4efbc454ba68def5ef285b26ebfcfdb605b52755
|
4.887936604297711e+37
| 34 |
sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <raistlin@linux.it>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1392585857-10725-1-git-send-email-vegard.nossum@oracle.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
| 0 |
struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
if (id == 0)
id = RT_TABLE_MAIN;
h = id & (FIB_TABLE_HASHSZ - 1);
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (tb->tb_id == id)
return tb;
}
return NULL;
}
|
Safe
|
[
"CWE-399"
] |
net-next
|
fbd40ea0180a2d328c5adc61414dc8bab9335ce2
|
5.379043265634394e+37
| 17 |
ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <solar@openwall.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Tested-by: Cyrill Gorcunov <gorcunov@openvz.org>
| 0 |
GF_Err stbl_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
//we need to parse DegPrior in a special way
GF_SampleTableBox *ptr = (GF_SampleTableBox *)s;
e = gf_isom_box_array_read(s, bs);
if (e) return e;
if (!ptr->SyncSample)
ptr->no_sync_found = 1;
ptr->nb_sgpd_in_stbl = gf_list_count(ptr->sampleGroupsDescription);
ptr->nb_stbl_boxes = gf_list_count(ptr->child_boxes);
if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_CLONE_TRACK)
return GF_OK;
// return GF_OK;
//these boxes are mandatory !
if (!ptr->SampleToChunk || !ptr->SampleSize || !ptr->ChunkOffset || !ptr->TimeToSample)
return GF_ISOM_INVALID_FILE;
//sanity check
if (ptr->SampleSize->sampleCount) {
if (!ptr->TimeToSample->nb_entries || !ptr->SampleToChunk->nb_entries)
return GF_ISOM_INVALID_FILE;
}
return GF_OK;
}
|
Safe
|
[
"CWE-476",
"CWE-787"
] |
gpac
|
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
|
3.387237724311688e+38
| 29 |
fixed #1757
| 0 |
int tls1_process_ticket(SSL *s, unsigned char *session_id, int len,
const unsigned char *limit, SSL_SESSION **ret)
{
/* Point after session ID in client hello */
const unsigned char *p = session_id + len;
unsigned short i;
*ret = NULL;
s->tlsext_ticket_expected = 0;
/*
* If tickets disabled behave as if no ticket present to permit stateful
* resumption.
*/
if (SSL_get_options(s) & SSL_OP_NO_TICKET)
return 0;
if ((s->version <= SSL3_VERSION) || !limit)
return 0;
if (p >= limit)
return -1;
/* Skip past DTLS cookie */
if (s->version == DTLS1_VERSION || s->version == DTLS1_BAD_VER) {
i = *(p++);
p += i;
if (p >= limit)
return -1;
}
/* Skip past cipher list */
n2s(p, i);
p += i;
if (p >= limit)
return -1;
/* Skip past compression algorithm list */
i = *(p++);
p += i;
if (p > limit)
return -1;
/* Now at start of extensions */
if ((p + 2) >= limit)
return 0;
n2s(p, i);
while ((p + 4) <= limit) {
unsigned short type, size;
n2s(p, type);
n2s(p, size);
if (p + size > limit)
return 0;
if (type == TLSEXT_TYPE_session_ticket) {
int r;
if (size == 0) {
/*
* The client will accept a ticket but doesn't currently have
* one.
*/
s->tlsext_ticket_expected = 1;
return 1;
}
if (s->tls_session_secret_cb) {
/*
* Indicate that the ticket couldn't be decrypted rather than
* generating the session from ticket now, trigger
* abbreviated handshake based on external mechanism to
* calculate the master secret later.
*/
return 2;
}
r = tls_decrypt_ticket(s, p, size, session_id, len, ret);
switch (r) {
case 2: /* ticket couldn't be decrypted */
s->tlsext_ticket_expected = 1;
return 2;
case 3: /* ticket was decrypted */
return r;
case 4: /* ticket decrypted but need to renew */
s->tlsext_ticket_expected = 1;
return 3;
default: /* fatal error */
return -1;
}
}
p += size;
}
return 0;
}
|
Vulnerable
|
[
"CWE-190"
] |
openssl
|
6f35f6deb5ca7daebe289f86477e061ce3ee5f46
|
6.470835273834456e+37
| 84 |
Avoid some undefined pointer arithmetic
A common idiom in the codebase is:
if (p + len > limit)
{
return; /* Too long */
}
Where "p" points to some malloc'd data of SIZE bytes and
limit == p + SIZE
"len" here could be from some externally supplied data (e.g. from a TLS
message).
The rules of C pointer arithmetic are such that "p + len" is only well
defined where len <= SIZE. Therefore the above idiom is actually
undefined behaviour.
For example this could cause problems if some malloc implementation
provides an address for "p" such that "p + len" actually overflows for
values of len that are too big and therefore p + len < limit!
Issue reported by Guido Vranken.
CVE-2016-2177
Reviewed-by: Rich Salz <rsalz@openssl.org>
| 1 |
lex (void)
{
unsigned int c, c2;
int backslash = 0;
charclass ccl;
int i;
/* Basic plan: We fetch a character. If it's a backslash,
we set the backslash flag and go through the loop again.
On the plus side, this avoids having a duplicate of the
main switch inside the backslash case. On the minus side,
it means that just about every case begins with
"if (backslash) ...". */
for (i = 0; i < 2; ++i)
{
if (MB_CUR_MAX > 1)
{
FETCH_WC (c, wctok, NULL);
if ((int)c == EOF)
goto normal_char;
}
else
FETCH(c, NULL);
switch (c)
{
case '\\':
if (backslash)
goto normal_char;
if (lexleft == 0)
dfaerror(_("unfinished \\ escape"));
backslash = 1;
break;
case '^':
if (backslash)
goto normal_char;
if (syntax_bits & RE_CONTEXT_INDEP_ANCHORS
|| lasttok == END
|| lasttok == LPAREN
|| lasttok == OR)
return lasttok = BEGLINE;
goto normal_char;
case '$':
if (backslash)
goto normal_char;
if (syntax_bits & RE_CONTEXT_INDEP_ANCHORS
|| lexleft == 0
|| (syntax_bits & RE_NO_BK_PARENS
? lexleft > 0 && *lexptr == ')'
: lexleft > 1 && lexptr[0] == '\\' && lexptr[1] == ')')
|| (syntax_bits & RE_NO_BK_VBAR
? lexleft > 0 && *lexptr == '|'
: lexleft > 1 && lexptr[0] == '\\' && lexptr[1] == '|')
|| ((syntax_bits & RE_NEWLINE_ALT)
&& lexleft > 0 && *lexptr == '\n'))
return lasttok = ENDLINE;
goto normal_char;
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
if (backslash && !(syntax_bits & RE_NO_BK_REFS))
{
laststart = 0;
return lasttok = BACKREF;
}
goto normal_char;
case '`':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = BEGLINE; /* FIXME: should be beginning of string */
goto normal_char;
case '\'':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = ENDLINE; /* FIXME: should be end of string */
goto normal_char;
case '<':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = BEGWORD;
goto normal_char;
case '>':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = ENDWORD;
goto normal_char;
case 'b':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = LIMWORD;
goto normal_char;
case 'B':
if (backslash && !(syntax_bits & RE_NO_GNU_OPS))
return lasttok = NOTLIMWORD;
goto normal_char;
case '?':
if (syntax_bits & RE_LIMITED_OPS)
goto normal_char;
if (backslash != ((syntax_bits & RE_BK_PLUS_QM) != 0))
goto normal_char;
if (!(syntax_bits & RE_CONTEXT_INDEP_OPS) && laststart)
goto normal_char;
return lasttok = QMARK;
case '*':
if (backslash)
goto normal_char;
if (!(syntax_bits & RE_CONTEXT_INDEP_OPS) && laststart)
goto normal_char;
return lasttok = STAR;
case '+':
if (syntax_bits & RE_LIMITED_OPS)
goto normal_char;
if (backslash != ((syntax_bits & RE_BK_PLUS_QM) != 0))
goto normal_char;
if (!(syntax_bits & RE_CONTEXT_INDEP_OPS) && laststart)
goto normal_char;
return lasttok = PLUS;
case '{':
if (!(syntax_bits & RE_INTERVALS))
goto normal_char;
if (backslash != ((syntax_bits & RE_NO_BK_BRACES) == 0))
goto normal_char;
if (!(syntax_bits & RE_CONTEXT_INDEP_OPS) && laststart)
goto normal_char;
if (syntax_bits & RE_NO_BK_BRACES)
{
/* Scan ahead for a valid interval; if it's not valid,
treat it as a literal '{'. */
int lo = -1, hi = -1;
char const *p = lexptr;
char const *lim = p + lexleft;
for (; p != lim && ISASCIIDIGIT (*p); p++)
{
if (lo < 0)
lo = *p - '0';
else
{
lo = lo * 10 + *p - '0';
if (RE_DUP_MAX < lo)
goto normal_char;
}
}
if (p != lim && *p == ',')
while (++p != lim && ISASCIIDIGIT (*p))
{
if (hi < 0)
hi = *p - '0';
else
{
hi = hi * 10 + *p - '0';
if (RE_DUP_MAX < hi)
goto normal_char;
}
}
else
hi = lo;
if (p == lim || *p != '}'
|| lo < 0 || (0 <= hi && hi < lo))
goto normal_char;
}
minrep = 0;
/* Cases:
{M} - exact count
{M,} - minimum count, maximum is infinity
{M,N} - M through N */
FETCH(c, _("unfinished repeat count"));
if (ISASCIIDIGIT (c))
{
minrep = c - '0';
for (;;)
{
FETCH(c, _("unfinished repeat count"));
if (! ISASCIIDIGIT (c))
break;
minrep = 10 * minrep + c - '0';
}
}
else
dfaerror(_("malformed repeat count"));
if (c == ',')
{
FETCH (c, _("unfinished repeat count"));
if (! ISASCIIDIGIT (c))
maxrep = -1;
else
{
maxrep = c - '0';
for (;;)
{
FETCH (c, _("unfinished repeat count"));
if (! ISASCIIDIGIT (c))
break;
maxrep = 10 * maxrep + c - '0';
}
if (0 <= maxrep && maxrep < minrep)
dfaerror (_("malformed repeat count"));
}
}
else
maxrep = minrep;
if (!(syntax_bits & RE_NO_BK_BRACES))
{
if (c != '\\')
dfaerror(_("malformed repeat count"));
FETCH(c, _("unfinished repeat count"));
}
if (c != '}')
dfaerror(_("malformed repeat count"));
laststart = 0;
return lasttok = REPMN;
case '|':
if (syntax_bits & RE_LIMITED_OPS)
goto normal_char;
if (backslash != ((syntax_bits & RE_NO_BK_VBAR) == 0))
goto normal_char;
laststart = 1;
return lasttok = OR;
case '\n':
if (syntax_bits & RE_LIMITED_OPS
|| backslash
|| !(syntax_bits & RE_NEWLINE_ALT))
goto normal_char;
laststart = 1;
return lasttok = OR;
case '(':
if (backslash != ((syntax_bits & RE_NO_BK_PARENS) == 0))
goto normal_char;
++parens;
laststart = 1;
return lasttok = LPAREN;
case ')':
if (backslash != ((syntax_bits & RE_NO_BK_PARENS) == 0))
goto normal_char;
if (parens == 0 && syntax_bits & RE_UNMATCHED_RIGHT_PAREN_ORD)
goto normal_char;
--parens;
laststart = 0;
return lasttok = RPAREN;
case '.':
if (backslash)
goto normal_char;
if (MB_CUR_MAX > 1)
{
/* In multibyte environment period must match with a single
character not a byte. So we use ANYCHAR. */
laststart = 0;
return lasttok = ANYCHAR;
}
zeroset(ccl);
notset(ccl);
if (!(syntax_bits & RE_DOT_NEWLINE))
clrbit(eolbyte, ccl);
if (syntax_bits & RE_DOT_NOT_NULL)
clrbit('\0', ccl);
laststart = 0;
return lasttok = CSET + charclass_index(ccl);
case 's':
case 'S':
if (!backslash || (syntax_bits & RE_NO_GNU_OPS))
goto normal_char;
zeroset(ccl);
for (c2 = 0; c2 < NOTCHAR; ++c2)
if (isspace(c2))
setbit(c2, ccl);
if (c == 'S')
notset(ccl);
laststart = 0;
return lasttok = CSET + charclass_index(ccl);
case 'w':
case 'W':
if (!backslash || (syntax_bits & RE_NO_GNU_OPS))
goto normal_char;
zeroset(ccl);
for (c2 = 0; c2 < NOTCHAR; ++c2)
if (IS_WORD_CONSTITUENT(c2))
setbit(c2, ccl);
if (c == 'W')
notset(ccl);
laststart = 0;
return lasttok = CSET + charclass_index(ccl);
case '[':
if (backslash)
goto normal_char;
laststart = 0;
return lasttok = parse_bracket_exp();
default:
normal_char:
laststart = 0;
/* For multibyte character sets, folding is done in atom. Always
return WCHAR. */
if (MB_CUR_MAX > 1)
return lasttok = WCHAR;
if (case_fold && isalpha(c))
{
zeroset(ccl);
setbit_case_fold_c (c, ccl);
return lasttok = CSET + charclass_index(ccl);
}
return lasttok = c;
}
}
/* The above loop should consume at most a backslash
and some other character. */
abort();
return END; /* keeps pedantic compilers happy. */
}
|
Safe
|
[
"CWE-189"
] |
grep
|
cbbc1a45b9f843c811905c97c90a5d31f8e6c189
|
1.0931701108127293e+38
| 334 |
grep: fix some core dumps with long lines etc.
These problems mostly occur because the code attempts to stuff
sizes into int or into unsigned int; this doesn't work on most
64-bit hosts and the errors can lead to core dumps.
* NEWS: Document this.
* src/dfa.c (token): Typedef to ptrdiff_t, since the enum's
range could be as small as -128 .. 127 on practical hosts.
(position.index): Now size_t, not unsigned int.
(leaf_set.elems): Now size_t *, not unsigned int *.
(dfa_state.hash, struct mb_char_classes.nchars, .nch_classes)
(.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex)
(.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets):
(.mbcsets_alloc): Now size_t, not int.
(dfa_state.first_end): Now token, not int.
(state_num): New type.
(struct mb_char_classes.cset): Now ptrdiff_t, not int.
(struct dfa.utf8_anychar_classes): Now token[5], not int[5].
(struct dfa.sindex, .salloc, .tralloc): Now state_num, not int.
(struct dfa.trans, .realtrans, .fails): Now state_num **, not int **.
(struct dfa.newlines): Now state_num *, not int *.
(prtok): Don't assume 'token' is no wider than int.
(lexleft, parens, depth): Now size_t, not int.
(charclass_index, nsubtoks)
(parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete)
(state_index, epsclosure, state_separate_contexts)
(dfaanalyze, dfastate, build_state, realloc_trans_if_necessary)
(transit_state_singlebyte, match_anychar, match_mb_charset)
(check_matching_with_multibyte_ops, transit_state_consume_1char)
(transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree)
(freelist, enlist, addlists, inboth, dfamust):
Don't assume indexes fit in 'int'.
(lex): Avoid overflow in string-to-{hi,lo} conversions.
(dfaanalyze): Redo indexing so that it works with size_t values,
which cannot go negative.
* src/dfa.h (dfaexec): Count argument is now size_t *, not int *.
(dfastate): State numbers are now ptrdiff_t, not int.
* src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM.
(kwset_exact_matches): Now size_t, not int.
(EGexecute): Don't assume indexes fit in 'int'.
Check for overflow before converting a ptrdiff_t to a regoff_t,
as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX).
Check for memory exhaustion in re_search rather than treating
it merely as failure to match; use xalloc_die () to report any error.
* src/kwset.c (struct trie.accepting): Now size_t, not unsigned int.
(struct kwset.words): Now ptrdiff_t, not int.
* src/kwset.h (struct kwsmatch.index): Now size_t, not int.
| 0 |
got_code_from_term(char_u *code, int len)
{
#define XT_LEN 100
char_u name[3];
char_u str[XT_LEN];
int i;
int j = 0;
int c;
// A '1' means the code is supported, a '0' means it isn't.
// When half the length is > XT_LEN we can't use it.
// Our names are currently all 2 characters.
if (code[0] == '1' && code[7] == '=' && len / 2 < XT_LEN)
{
// Get the name from the response and find it in the table.
name[0] = hexhex2nr(code + 3);
name[1] = hexhex2nr(code + 5);
name[2] = NUL;
for (i = 0; key_names[i] != NULL; ++i)
{
if (STRCMP(key_names[i], name) == 0)
{
xt_index_in = i;
break;
}
}
LOG_TR(("Received XT %d: %s", xt_index_in, (char *)name));
if (key_names[i] != NULL)
{
for (i = 8; (c = hexhex2nr(code + i)) >= 0; i += 2)
str[j++] = c;
str[j] = NUL;
if (name[0] == 'C' && name[1] == 'o')
{
// Color count is not a key code.
may_adjust_color_count(atoi((char *)str));
}
else
{
// First delete any existing entry with the same code.
i = find_term_bykeys(str);
if (i >= 0)
del_termcode_idx(i);
add_termcode(name, str, ATC_FROM_TERM);
}
}
}
// May request more codes now that we received one.
++xt_index_in;
req_more_codes_from_term();
}
|
Safe
|
[
"CWE-125",
"CWE-787"
] |
vim
|
e178af5a586ea023622d460779fdcabbbfac0908
|
5.011485390506441e+37
| 54 |
patch 8.2.5160: accessing invalid memory after changing terminal size
Problem: Accessing invalid memory after changing terminal size.
Solution: Adjust cmdline_row and msg_row to the value of Rows.
| 0 |
DEFUN (neighbor_unsuppress_map,
neighbor_unsuppress_map_cmd,
NEIGHBOR_CMD2 "unsuppress-map WORD",
NEIGHBOR_STR
NEIGHBOR_ADDR_STR2
"Route-map to selectively unsuppress suppressed routes\n"
"Name of route map\n")
{
return peer_unsuppress_map_set_vty (vty, argv[0], bgp_node_afi (vty),
bgp_node_safi (vty), argv[1]);
}
|
Safe
|
[
"CWE-125"
] |
frr
|
6d58272b4cf96f0daa846210dd2104877900f921
|
5.0466558449272695e+37
| 11 |
[bgpd] cleanup, compact and consolidate capability parsing code
2007-07-26 Paul Jakma <paul.jakma@sun.com>
* (general) Clean up and compact capability parsing slightly.
Consolidate validation of length and logging of generic TLV, and
memcpy of capability data, thus removing such from cap specifc
code (not always present or correct).
* bgp_open.h: Add structures for the generic capability TLV header
and for the data formats of the various specific capabilities we
support. Hence remove the badly named, or else misdefined, struct
capability.
* bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data.
Do the length checks *before* memcpy()'ing based on that length
(stored capability - should have been validated anyway on input,
but..).
(bgp_afi_safi_valid_indices) new function to validate (afi,safi)
which is about to be used as index into arrays, consolidates
several instances of same, at least one of which appeared to be
incomplete..
(bgp_capability_mp) Much condensed.
(bgp_capability_orf_entry) New, process one ORF entry
(bgp_capability_orf) Condensed. Fixed to process all ORF entries.
(bgp_capability_restart) Condensed, and fixed to use a
cap-specific type, rather than abusing capability_mp.
(struct message capcode_str) added to aid generic logging.
(size_t cap_minsizes[]) added to aid generic validation of
capability length field.
(bgp_capability_parse) Generic logging and validation of TLV
consolidated here. Code compacted as much as possible.
* bgp_packet.c: (bgp_open_receive) Capability parsers now use
streams, so no more need here to manually fudge the input stream
getp.
(bgp_capability_msg_parse) use struct capability_mp_data. Validate
lengths /before/ memcpy. Use bgp_afi_safi_valid_indices.
(bgp_capability_receive) Exported for use by test harness.
* bgp_vty.c: (bgp_show_summary) fix conversion warning
(bgp_show_peer) ditto
* bgp_debug.h: Fix storage 'extern' after type 'const'.
* lib/log.c: (mes_lookup) warning about code not being in
same-number array slot should be debug, not warning. E.g. BGP
has several discontigious number spaces, allocating from
different parts of a space is not uncommon (e.g. IANA
assigned versus vendor-assigned code points in some number
space).
| 0 |
process_n_select(void)
{
if (cur_select == NULL)
return NULL;
process_option();
#ifdef MENU_SELECT
if (!select_is_multiple) {
if (select_option[n_select].first) {
FormItemList sitem;
chooseSelectOption(&sitem, select_option[n_select].first);
Strcat(select_str, textfieldrep(sitem.label, cur_option_maxwidth));
}
Strcat_charp(select_str, "</input_alt>]</pre_int>");
n_select++;
}
else
#endif /* MENU_SELECT */
Strcat_charp(select_str, "<br>");
cur_select = NULL;
n_selectitem = 0;
return select_str;
}
|
Safe
|
[
"CWE-476"
] |
w3m
|
59b91cd8e30c86f23476fa81ae005cabff49ebb6
|
2.1847126818263734e+37
| 22 |
Prevent segfault with malformed input type
Bug-Debian: https://github.com/tats/w3m/issues/7
| 0 |
static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
{
struct cpl_tid_release *req;
skb = get_skb(skb, sizeof *req, GFP_KERNEL);
if (!skb)
return;
req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
skb->priority = CPL_PRIORITY_SETUP;
iwch_cxgb3_ofld_send(tdev, skb);
return;
}
|
Safe
|
[
"CWE-703"
] |
linux
|
67f1aee6f45059fd6b0f5b0ecb2c97ad0451f6b3
|
2.6163423254773034e+38
| 14 |
iw_cxgb3: Fix incorrectly returning error on success
The cxgb3_*_send() functions return NET_XMIT_ values, which are
positive integers values. So don't treat positive return values
as an error.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
| 0 |
C_OnMapApply(OSDService *service,
const list<OSDMapRef> &pinned_maps,
epoch_t e)
: service(service), pinned_maps(pinned_maps), e(e) {}
|
Safe
|
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
|
2.940242943078327e+38
| 4 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <sage@redhat.com>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
| 0 |
static inline int pmd_trans_huge(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_PSE;
}
|
Safe
|
[
"CWE-119",
"CWE-787"
] |
linux
|
027ef6c87853b0a9df53175063028edb4950d476
|
3.1869689992303962e+38
| 4 |
mm: thp: fix pmd_present for split_huge_page and PROT_NONE with THP
In many places !pmd_present has been converted to pmd_none. For pmds
that's equivalent and pmd_none is quicker so using pmd_none is better.
However (unless we delete pmd_present) we should provide an accurate
pmd_present too. This will avoid the risk of code thinking the pmd is non
present because it's under __split_huge_page_map, see the pmd_mknotpresent
there and the comment above it.
If the page has been mprotected as PROT_NONE, it would also lead to a
pmd_present false negative in the same way as the race with
split_huge_page.
Because the PSE bit stays on at all times (both during split_huge_page and
when the _PAGE_PROTNONE bit get set), we could only check for the PSE bit,
but checking the PROTNONE bit too is still good to remember pmd_present
must always keep PROT_NONE into account.
This explains a not reproducible BUG_ON that was seldom reported on the
lists.
The same issue is in pmd_large, it would go wrong with both PROT_NONE and
if it races with split_huge_page.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| 0 |
open_archive (char *file)
{
int fd;
void (*copy_in) (); /* Workaround for pcc bug. */
copy_in = process_copy_in;
if (copy_function == copy_in)
fd = rmtopen (file, O_RDONLY | O_BINARY, MODE_RW, rsh_command_option);
else
{
if (!append_flag)
fd = rmtopen (file, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, MODE_RW,
rsh_command_option);
else
fd = rmtopen (file, O_RDWR | O_BINARY, MODE_RW, rsh_command_option);
}
return fd;
}
|
Safe
|
[
"CWE-190"
] |
cpio
|
dd96882877721703e19272fe25034560b794061b
|
1.8362554713698566e+38
| 20 |
Rewrite dynamic string support.
* src/dstring.c (ds_init): Take a single argument.
(ds_free): New function.
(ds_resize): Take a single argument. Use x2nrealloc to expand
the storage.
(ds_reset,ds_append,ds_concat,ds_endswith): New function.
(ds_fgetstr): Rewrite. In particular, this fixes integer overflow.
* src/dstring.h (dynamic_string): Keep both the allocated length
(ds_size) and index of the next free byte in the string (ds_idx).
(ds_init,ds_resize): Change signature.
(ds_len): New macro.
(ds_free,ds_reset,ds_append,ds_concat,ds_endswith): New protos.
* src/copyin.c: Use new ds_ functions.
* src/copyout.c: Likewise.
* src/copypass.c: Likewise.
* src/util.c: Likewise.
| 0 |
on_session_client_connected (GdmSession *session,
GCredentials *credentials,
GPid pid_of_client,
GdmManager *manager)
{
GdmDisplay *display;
char *username;
int delay;
gboolean enabled;
gboolean allow_timed_login = FALSE;
g_debug ("GdmManager: client connected");
display = get_display_for_user_session (session);
if (display == NULL) {
return;
}
if (!display_is_on_seat0 (display)) {
return;
}
#ifdef WITH_PLYMOUTH
if (manager->priv->plymouth_is_running) {
plymouth_quit_with_transition ();
manager->priv->plymouth_is_running = FALSE;
}
#endif
g_object_get (G_OBJECT (display), "allow-timed-login", &allow_timed_login, NULL);
if (!allow_timed_login) {
return;
}
enabled = get_timed_login_details (manager, &username, &delay);
if (! enabled) {
return;
}
gdm_session_set_timed_login_details (session, username, delay);
g_debug ("GdmManager: Starting automatic login conversation (for timed login)");
gdm_session_start_conversation (session, "gdm-autologin");
g_free (username);
}
|
Safe
|
[] |
gdm
|
ff98b2817014684ae1acec78ff06f0f461a56a9f
|
2.509767511254559e+38
| 50 |
manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520
| 0 |
static void makedirs(const char* name)
{
char* p = strrchr(name, '/');
if (p) {
char* dir_name = _zzip_strndup(name, p-name);
makedirs(dir_name);
free (dir_name);
}
if (_zzip_mkdir(name, 0775) == -1 && errno != EEXIST)
{
DBG3("while mkdir %s : %s", name, strerror(errno));
}
errno = 0;
}
|
Safe
|
[
"CWE-772"
] |
zziplib
|
83a2da55922f67e07f22048ac9671a44cc0d35c4
|
3.5788514331819323e+37
| 14 |
ensure disk_close to avoid mem-leak #40
| 0 |
CModule::EModRet CModule::OnUserCTCPReplyMessage(CCTCPMessage& Message) {
CString sTarget = Message.GetTarget();
CString sText = Message.GetText();
EModRet ret = OnUserCTCPReply(sTarget, sText);
Message.SetTarget(sTarget);
Message.SetText(sText);
return ret;
}
|
Safe
|
[
"CWE-20",
"CWE-264"
] |
znc
|
8de9e376ce531fe7f3c8b0aa4876d15b479b7311
|
2.6626434197959287e+38
| 8 |
Fix remote code execution and privilege escalation vulnerability.
To trigger this, need to have a user already.
Thanks for Jeriko One <jeriko.one@gmx.us> for finding and reporting this.
CVE-2019-12816
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.