CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2017-8422
https://www.cvedetails.com/cve/CVE-2017-8422/
CWE-290
https://cgit.kde.org/kauth.git/commit/?id=df875f725293af53399f5146362eb158b4f9216a
df875f725293af53399f5146362eb158b4f9216a
null
void Polkit1Backend::preAuthAction(const QString &action, QWidget *parent) { if (!parent) { qCDebug(KAUTH) << "Parent widget does not exist, skipping"; return; } if (QDBusConnection::sessionBus().interface()->isServiceRegistered(QLatin1String("org.kde.polkit-kde-authentication-agent-1"))) { if (qApp == 0 || !qobject_cast<QApplication *>(qApp)) { qCDebug(KAUTH) << "Not streaming parent as we are on a TTY application"; } qulonglong wId = parent->effectiveWinId(); QDBusMessage methodCall = QDBusMessage::createMethodCall(QLatin1String("org.kde.polkit-kde-authentication-agent-1"), QLatin1String("/org/kde/Polkit1AuthAgent"), QLatin1String("org.kde.Polkit1AuthAgent"), QLatin1String("setWIdForAction")); methodCall << action; methodCall << wId; QDBusPendingCall call = QDBusConnection::sessionBus().asyncCall(methodCall); call.waitForFinished(); if (call.isError()) { qCWarning(KAUTH) << "ERROR while streaming the parent!!" << call.error(); } } else { qCDebug(KAUTH) << "KDE polkit agent appears too old or not registered on the bus"; } }
void Polkit1Backend::preAuthAction(const QString &action, QWidget *parent) { if (!parent) { qCDebug(KAUTH) << "Parent widget does not exist, skipping"; return; } if (QDBusConnection::sessionBus().interface()->isServiceRegistered(QLatin1String("org.kde.polkit-kde-authentication-agent-1"))) { if (qApp == 0 || !qobject_cast<QApplication *>(qApp)) { qCDebug(KAUTH) << "Not streaming parent as we are on a TTY application"; } qulonglong wId = parent->effectiveWinId(); QDBusMessage methodCall = QDBusMessage::createMethodCall(QLatin1String("org.kde.polkit-kde-authentication-agent-1"), QLatin1String("/org/kde/Polkit1AuthAgent"), QLatin1String("org.kde.Polkit1AuthAgent"), QLatin1String("setWIdForAction")); methodCall << action; methodCall << wId; QDBusPendingCall call = QDBusConnection::sessionBus().asyncCall(methodCall); call.waitForFinished(); if (call.isError()) { qCWarning(KAUTH) << "ERROR while streaming the parent!!" << call.error(); } } else { qCDebug(KAUTH) << "KDE polkit agent appears too old or not registered on the bus"; } }
CPP
kde
0
CVE-2019-3817
https://www.cvedetails.com/cve/CVE-2019-3817/
CWE-416
https://github.com/rpm-software-management/libcomps/commit/e3a5d056633677959ad924a51758876d415e7046
e3a5d056633677959ad924a51758876d415e7046
Fix UAF in comps_objmrtree_unite function The added field is not used at all in many places and it is probably the left-over of some copy-paste.
signed char comps_objmrtree_cmp(COMPS_ObjMRTree *ort1, COMPS_ObjMRTree *ort2) { COMPS_HSList *values1, *values2; COMPS_HSListItem *it; COMPS_Set *set1, *set2; signed char ret; values1 = comps_objmrtree_pairs(ort1); values2 = comps_objmrtree_pairs(ort2); set1 = comps_set_create(); comps_set_init(set1, NULL, NULL, NULL, &comps_objmrtree_paircmp); set2 = comps_set_create(); comps_set_init(set2, NULL, NULL, NULL, &comps_objmrtree_paircmp); for (it = values1->first; it != NULL; it = it->next) { comps_set_add(set1, it->data); } for (it = values2->first; it != NULL; it = it->next) { comps_set_add(set2, it->data); } ret = comps_set_cmp(set1, set2); comps_set_destroy(&set1); comps_set_destroy(&set2); comps_hslist_destroy(&values1); comps_hslist_destroy(&values2); return !ret; }
signed char comps_objmrtree_cmp(COMPS_ObjMRTree *ort1, COMPS_ObjMRTree *ort2) { COMPS_HSList *values1, *values2; COMPS_HSListItem *it; COMPS_Set *set1, *set2; signed char ret; values1 = comps_objmrtree_pairs(ort1); values2 = comps_objmrtree_pairs(ort2); set1 = comps_set_create(); comps_set_init(set1, NULL, NULL, NULL, &comps_objmrtree_paircmp); set2 = comps_set_create(); comps_set_init(set2, NULL, NULL, NULL, &comps_objmrtree_paircmp); for (it = values1->first; it != NULL; it = it->next) { comps_set_add(set1, it->data); } for (it = values2->first; it != NULL; it = it->next) { comps_set_add(set2, it->data); } ret = comps_set_cmp(set1, set2); comps_set_destroy(&set1); comps_set_destroy(&set2); comps_hslist_destroy(&values1); comps_hslist_destroy(&values2); return !ret; }
C
libcomps
0
CVE-2011-2790
https://www.cvedetails.com/cve/CVE-2011-2790/
CWE-399
https://github.com/chromium/chromium/commit/adb3498ca0b69561d8c6b60bab641de4b0e37dbf
adb3498ca0b69561d8c6b60bab641de4b0e37dbf
Reviewed by Kevin Ollivier. [wx] Fix strokeArc and fillRoundedRect drawing, and add clipPath support. https://bugs.webkit.org/show_bug.cgi?id=60847 git-svn-id: svn://svn.chromium.org/blink/trunk@86502 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void GraphicsContext::platformInit(PlatformGraphicsContext* context) { m_data = new GraphicsContextPlatformPrivate; setPaintingDisabled(!context); if (context) { setPlatformFillColor(fillColor(), ColorSpaceDeviceRGB); setPlatformStrokeColor(strokeColor(), ColorSpaceDeviceRGB); } #if USE(WXGC) m_data->context = (wxGCDC*)context; #else m_data->context = (wxWindowDC*)context; #endif }
void GraphicsContext::platformInit(PlatformGraphicsContext* context) { m_data = new GraphicsContextPlatformPrivate; setPaintingDisabled(!context); if (context) { setPlatformFillColor(fillColor(), ColorSpaceDeviceRGB); setPlatformStrokeColor(strokeColor(), ColorSpaceDeviceRGB); } #if USE(WXGC) m_data->context = (wxGCDC*)context; #else m_data->context = (wxWindowDC*)context; #endif }
C
Chrome
0
CVE-2017-6991
https://www.cvedetails.com/cve/CVE-2017-6991/
CWE-119
https://github.com/chromium/chromium/commit/3bfe67c9c4b45eb713326aae7a67c8f7390dae08
3bfe67c9c4b45eb713326aae7a67c8f7390dae08
sqlite: safely move pointer values through SQL. This lands https://www.sqlite.org/src/timeline?c=d6a44b35 in third_party/sqlite/src/ and third_party/sqlite/patches/0013-Add-new-interfaces-sqlite3_bind_pointer-sqlite3_resu.patch and re-generates third_party/sqlite/amalgamation/* using the script at third_party/sqlite/google_generate_amalgamation.sh. The CL also adds a layout test that verifies the patch works as intended. BUG=742407 Change-Id: I2e1a457459cd2e975e6241b630e7b79c82545981 Reviewed-on: https://chromium-review.googlesource.com/572976 Reviewed-by: Chris Mumford <cmumford@chromium.org> Commit-Queue: Victor Costan <pwnall@chromium.org> Cr-Commit-Position: refs/heads/master@{#487275}
static int copyPayload( void *pPayload, /* Pointer to page data */ void *pBuf, /* Pointer to buffer */ int nByte, /* Number of bytes to copy */ int eOp, /* 0 -> copy from page, 1 -> copy to page */ DbPage *pDbPage /* Page containing pPayload */ ){ if( eOp ){ /* Copy data from buffer to page (a write operation) */ int rc = sqlite3PagerWrite(pDbPage); if( rc!=SQLITE_OK ){ return rc; } memcpy(pPayload, pBuf, nByte); }else{ /* Copy data from page to buffer (a read operation) */ memcpy(pBuf, pPayload, nByte); } return SQLITE_OK; }
static int copyPayload( void *pPayload, /* Pointer to page data */ void *pBuf, /* Pointer to buffer */ int nByte, /* Number of bytes to copy */ int eOp, /* 0 -> copy from page, 1 -> copy to page */ DbPage *pDbPage /* Page containing pPayload */ ){ if( eOp ){ /* Copy data from buffer to page (a write operation) */ int rc = sqlite3PagerWrite(pDbPage); if( rc!=SQLITE_OK ){ return rc; } memcpy(pPayload, pBuf, nByte); }else{ /* Copy data from page to buffer (a read operation) */ memcpy(pBuf, pPayload, nByte); } return SQLITE_OK; }
C
Chrome
0
CVE-2017-5009
https://www.cvedetails.com/cve/CVE-2017-5009/
CWE-119
https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
DevTools: send proper resource type in Network.RequestWillBeSent This patch plumbs resoure type into the DispatchWillSendRequest instrumenation. This allows us to report accurate type in Network.RequestWillBeSent event, instead of "Other", that we report today. BUG=765501 R=dgozman Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c Reviewed-on: https://chromium-review.googlesource.com/667504 Reviewed-by: Pavel Feldman <pfeldman@chromium.org> Reviewed-by: Dmitry Gozman <dgozman@chromium.org> Commit-Queue: Andrey Lushnikov <lushnikov@chromium.org> Cr-Commit-Position: refs/heads/master@{#507936}
void InspectorTraceEvents::Will(const probe::ExecuteScript&) {}
void InspectorTraceEvents::Will(const probe::ExecuteScript&) {}
C
Chrome
0
CVE-2011-1800
https://www.cvedetails.com/cve/CVE-2011-1800/
CWE-189
https://github.com/chromium/chromium/commit/1777aa6484af15014b8691082a8c3075418786f5
1777aa6484af15014b8691082a8c3075418786f5
[Qt][WK2] Allow transparent WebViews https://bugs.webkit.org/show_bug.cgi?id=80608 Reviewed by Tor Arne Vestbø. Added support for transparentBackground in QQuickWebViewExperimental. This uses the existing drawsTransparentBackground property in WebKit2. Also, changed LayerTreeHostQt to set the contentsOpaque flag when the root layer changes, otherwise the change doesn't take effect. A new API test was added. * UIProcess/API/qt/qquickwebview.cpp: (QQuickWebViewPrivate::setTransparentBackground): (QQuickWebViewPrivate::transparentBackground): (QQuickWebViewExperimental::transparentBackground): (QQuickWebViewExperimental::setTransparentBackground): * UIProcess/API/qt/qquickwebview_p.h: * UIProcess/API/qt/qquickwebview_p_p.h: (QQuickWebViewPrivate): * UIProcess/API/qt/tests/qquickwebview/tst_qquickwebview.cpp: (tst_QQuickWebView): (tst_QQuickWebView::transparentWebViews): * WebProcess/WebPage/qt/LayerTreeHostQt.cpp: (WebKit::LayerTreeHostQt::LayerTreeHostQt): (WebKit::LayerTreeHostQt::setRootCompositingLayer): git-svn-id: svn://svn.chromium.org/blink/trunk@110254 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void QQuickWebViewLegacyPrivate::updateViewportSize() { Q_Q(QQuickWebView); QSize viewportSize = q->boundingRect().size().toSize(); pageView->setContentsSize(viewportSize); webPageProxy->drawingArea()->setVisibleContentsRectForScaling(IntRect(IntPoint(), viewportSize), 1); }
void QQuickWebViewLegacyPrivate::updateViewportSize() { Q_Q(QQuickWebView); QSize viewportSize = q->boundingRect().size().toSize(); pageView->setContentsSize(viewportSize); webPageProxy->drawingArea()->setVisibleContentsRectForScaling(IntRect(IntPoint(), viewportSize), 1); }
C
Chrome
0
CVE-2016-3835
https://www.cvedetails.com/cve/CVE-2016-3835/
CWE-200
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
7558d03e6498e970b761aa44fff6b2c659202d95
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers Heap pointers do not point to user virtual addresses in case of secure session. Set them to NULL and add checks to avoid accesing them Bug: 28815329 Bug: 28920116 Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
bool venc_dev::handle_extradata(void *buffer, int index) { OMX_BUFFERHEADERTYPE *p_bufhdr = (OMX_BUFFERHEADERTYPE *) buffer; OMX_OTHER_EXTRADATATYPE *p_extra = NULL; if (!extradata_info.uaddr) { DEBUG_PRINT_ERROR("Extradata buffers not allocated"); return false; } p_extra = (OMX_OTHER_EXTRADATATYPE *)ALIGN(p_bufhdr->pBuffer + p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4); if (extradata_info.buffer_size > p_bufhdr->nAllocLen - ALIGN(p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4)) { DEBUG_PRINT_ERROR("Insufficient buffer size for extradata"); p_extra = NULL; return false; } else if (sizeof(msm_vidc_extradata_header) != sizeof(OMX_OTHER_EXTRADATATYPE)) { /* A lot of the code below assumes this condition, so error out if it's not met */ DEBUG_PRINT_ERROR("Extradata ABI mismatch"); return false; } struct msm_vidc_extradata_header *p_extradata = NULL; do { p_extradata = (struct msm_vidc_extradata_header *) (p_extradata ? ((char *)p_extradata) + p_extradata->size : extradata_info.uaddr + index * extradata_info.buffer_size); switch (p_extradata->type) { case MSM_VIDC_EXTRADATA_METADATA_MBI: { OMX_U32 payloadSize = append_mbi_extradata(&p_extra->data, p_extradata); p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE) + payloadSize, 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = (OMX_EXTRADATATYPE)OMX_ExtraDataVideoEncoderMBInfo; p_extra->nDataSize = payloadSize; break; } case MSM_VIDC_EXTRADATA_METADATA_LTR: { *p_extra->data = *p_extradata->data; p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE) + p_extradata->data_size, 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = (OMX_EXTRADATATYPE) OMX_ExtraDataVideoLTRInfo; p_extra->nDataSize = p_extradata->data_size; break; } case MSM_VIDC_EXTRADATA_NONE: p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE), 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = OMX_ExtraDataNone; p_extra->nDataSize = 0; break; default: /* No idea what this stuff is, just skip over it */ DEBUG_PRINT_HIGH("Found an unrecognised extradata (%x) ignoring it", p_extradata->type); continue; } p_extra = (OMX_OTHER_EXTRADATATYPE *)(((char *)p_extra) + p_extra->nSize); } while (p_extradata->type != MSM_VIDC_EXTRADATA_NONE); /* Just for debugging: Traverse the list of extra datas and spit it out onto log */ p_extra = (OMX_OTHER_EXTRADATATYPE *)ALIGN(p_bufhdr->pBuffer + p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4); while(p_extra->eType != OMX_ExtraDataNone) { DEBUG_PRINT_LOW("[%p/%u] found extradata type %x of size %u (%u) at %p", p_bufhdr->pBuffer, (unsigned int)p_bufhdr->nFilledLen, p_extra->eType, (unsigned int)p_extra->nSize, (unsigned int)p_extra->nDataSize, p_extra); p_extra = (OMX_OTHER_EXTRADATATYPE *) (((OMX_U8 *) p_extra) + p_extra->nSize); } return true; }
bool venc_dev::handle_extradata(void *buffer, int index) { OMX_BUFFERHEADERTYPE *p_bufhdr = (OMX_BUFFERHEADERTYPE *) buffer; OMX_OTHER_EXTRADATATYPE *p_extra = NULL; if (!extradata_info.uaddr) { DEBUG_PRINT_ERROR("Extradata buffers not allocated"); return false; } p_extra = (OMX_OTHER_EXTRADATATYPE *)ALIGN(p_bufhdr->pBuffer + p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4); if (extradata_info.buffer_size > p_bufhdr->nAllocLen - ALIGN(p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4)) { DEBUG_PRINT_ERROR("Insufficient buffer size for extradata"); p_extra = NULL; return false; } else if (sizeof(msm_vidc_extradata_header) != sizeof(OMX_OTHER_EXTRADATATYPE)) { /* A lot of the code below assumes this condition, so error out if it's not met */ DEBUG_PRINT_ERROR("Extradata ABI mismatch"); return false; } struct msm_vidc_extradata_header *p_extradata = NULL; do { p_extradata = (struct msm_vidc_extradata_header *) (p_extradata ? ((char *)p_extradata) + p_extradata->size : extradata_info.uaddr + index * extradata_info.buffer_size); switch (p_extradata->type) { case MSM_VIDC_EXTRADATA_METADATA_MBI: { OMX_U32 payloadSize = append_mbi_extradata(&p_extra->data, p_extradata); p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE) + payloadSize, 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = (OMX_EXTRADATATYPE)OMX_ExtraDataVideoEncoderMBInfo; p_extra->nDataSize = payloadSize; break; } case MSM_VIDC_EXTRADATA_METADATA_LTR: { *p_extra->data = *p_extradata->data; p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE) + p_extradata->data_size, 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = (OMX_EXTRADATATYPE) OMX_ExtraDataVideoLTRInfo; p_extra->nDataSize = p_extradata->data_size; break; } case MSM_VIDC_EXTRADATA_NONE: p_extra->nSize = ALIGN(sizeof(OMX_OTHER_EXTRADATATYPE), 4); p_extra->nVersion.nVersion = OMX_SPEC_VERSION; p_extra->nPortIndex = OMX_DirOutput; p_extra->eType = OMX_ExtraDataNone; p_extra->nDataSize = 0; break; default: /* No idea what this stuff is, just skip over it */ DEBUG_PRINT_HIGH("Found an unrecognised extradata (%x) ignoring it", p_extradata->type); continue; } p_extra = (OMX_OTHER_EXTRADATATYPE *)(((char *)p_extra) + p_extra->nSize); } while (p_extradata->type != MSM_VIDC_EXTRADATA_NONE); /* Just for debugging: Traverse the list of extra datas and spit it out onto log */ p_extra = (OMX_OTHER_EXTRADATATYPE *)ALIGN(p_bufhdr->pBuffer + p_bufhdr->nOffset + p_bufhdr->nFilledLen, 4); while(p_extra->eType != OMX_ExtraDataNone) { DEBUG_PRINT_LOW("[%p/%u] found extradata type %x of size %u (%u) at %p", p_bufhdr->pBuffer, (unsigned int)p_bufhdr->nFilledLen, p_extra->eType, (unsigned int)p_extra->nSize, (unsigned int)p_extra->nDataSize, p_extra); p_extra = (OMX_OTHER_EXTRADATATYPE *) (((OMX_U8 *) p_extra) + p_extra->nSize); } return true; }
C
Android
0
CVE-2016-10517
https://www.cvedetails.com/cve/CVE-2016-10517/
CWE-254
https://github.com/antirez/redis/commit/874804da0c014a7d704b3d285aa500098a931f50
874804da0c014a7d704b3d285aa500098a931f50
Security: Cross Protocol Scripting protection. This is an attempt at mitigating problems due to cross protocol scripting, an attack targeting services using line oriented protocols like Redis that can accept HTTP requests as valid protocol, by discarding the invalid parts and accepting the payloads sent, for example, via a POST request. For this to be effective, when we detect POST and Host: and terminate the connection asynchronously, the networking code was modified in order to never process further input. It was later verified that in a pipelined request containing a POST command, the successive commands are not executed.
void timeCommand(client *c) { struct timeval tv; /* gettimeofday() can only fail if &tv is a bad address so we * don't check for errors. */ gettimeofday(&tv,NULL); addReplyMultiBulkLen(c,2); addReplyBulkLongLong(c,tv.tv_sec); addReplyBulkLongLong(c,tv.tv_usec); }
void timeCommand(client *c) { struct timeval tv; /* gettimeofday() can only fail if &tv is a bad address so we * don't check for errors. */ gettimeofday(&tv,NULL); addReplyMultiBulkLen(c,2); addReplyBulkLongLong(c,tv.tv_sec); addReplyBulkLongLong(c,tv.tv_usec); }
C
redis
0
null
null
null
https://github.com/chromium/chromium/commit/fc790462b4f248712bbc8c3734664dd6b05f80f2
fc790462b4f248712bbc8c3734664dd6b05f80f2
Set the job name for the print job on the Mac. BUG=http://crbug.com/29188 TEST=as in bug Review URL: http://codereview.chromium.org/1997016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47056 0039d316-1c4b-4281-b951-d872f2087c98
void ResourceMessageFilter::OnDestruct() { ChromeThread::DeleteOnIOThread::Destruct(this); }
void ResourceMessageFilter::OnDestruct() { ChromeThread::DeleteOnIOThread::Destruct(this); }
C
Chrome
0
CVE-2013-4483
https://www.cvedetails.com/cve/CVE-2013-4483/
CWE-189
https://github.com/torvalds/linux/commit/6062a8dc0517bce23e3c2f7d2fea5e22411269a3
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); }
SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); }
C
linux
0
CVE-2012-5136
https://www.cvedetails.com/cve/CVE-2012-5136/
CWE-20
https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502
401d30ef93030afbf7e81e53a11b68fc36194502
Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document The member is used only in Document, thus no reason to stay in SecurityContext. TEST=none BUG=none R=haraken@chromium.org, abarth, haraken, hayato Review URL: https://codereview.chromium.org/27615003 git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void Document::setWindowAttributeEventListener(const AtomicString& eventType, PassRefPtr<EventListener> listener, DOMWrapperWorld* isolatedWorld) { DOMWindow* domWindow = this->domWindow(); if (!domWindow) return; domWindow->setAttributeEventListener(eventType, listener, isolatedWorld); }
void Document::setWindowAttributeEventListener(const AtomicString& eventType, PassRefPtr<EventListener> listener, DOMWrapperWorld* isolatedWorld) { DOMWindow* domWindow = this->domWindow(); if (!domWindow) return; domWindow->setAttributeEventListener(eventType, listener, isolatedWorld); }
C
Chrome
0
CVE-2015-8816
https://www.cvedetails.com/cve/CVE-2015-8816/
null
https://github.com/torvalds/linux/commit/e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
USB: fix invalid memory access in hub_activate() Commit 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") changed the hub_activate() routine to make part of it run in a workqueue. However, the commit failed to take a reference to the usb_hub structure or to lock the hub interface while doing so. As a result, if a hub is plugged in and quickly unplugged before the work routine can run, the routine will try to access memory that has been deallocated. Or, if the hub is unplugged while the routine is running, the memory may be deallocated while it is in active use. This patch fixes the problem by taking a reference to the usb_hub at the start of hub_activate() and releasing it at the end (when the work is finished), and by locking the hub interface while the work routine is running. It also adds a check at the start of the routine to see if the hub has already been disconnected, in which nothing should be done. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Reported-by: Alexandru Cornea <alexandru.cornea@intel.com> Tested-by: Alexandru Cornea <alexandru.cornea@intel.com> Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") CC: <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
static int wait_for_ss_port_enable(struct usb_device *udev, struct usb_hub *hub, int *port1, u16 *portchange, u16 *portstatus) { int status = 0, delay_ms = 0; while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); } return status; }
static int wait_for_ss_port_enable(struct usb_device *udev, struct usb_hub *hub, int *port1, u16 *portchange, u16 *portstatus) { int status = 0, delay_ms = 0; while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); } return status; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
5041f984669fe3a989a84c348eb838c8f7233f6b
AutoFill: Release the cached frame when we receive the frameDestroyed() message from WebKit. BUG=48857 TEST=none Review URL: http://codereview.chromium.org/3173005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
void RenderView::OnReloadFrame() { if (webview() && webview()->focusedFrame()) { webview()->focusedFrame()->reload(false); } }
void RenderView::OnReloadFrame() { if (webview() && webview()->focusedFrame()) { webview()->focusedFrame()->reload(false); } }
C
Chrome
0
CVE-2011-3104
https://www.cvedetails.com/cve/CVE-2011-3104/
CWE-119
https://github.com/chromium/chromium/commit/6b5f83842b5edb5d4bd6684b196b3630c6769731
6b5f83842b5edb5d4bd6684b196b3630c6769731
[i18n-fixlet] Make strings branding specific in extension code. IDS_EXTENSIONS_UNINSTALL IDS_EXTENSIONS_INCOGNITO_WARNING IDS_EXTENSION_INSTALLED_HEADING IDS_EXTENSION_ALERT_ITEM_EXTERNAL And fix a $1 $1 bug. IDS_EXTENSION_INLINE_INSTALL_PROMPT_TITLE BUG=NONE TEST=NONE Review URL: http://codereview.chromium.org/9107061 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@118018 0039d316-1c4b-4281-b951-d872f2087c98
void ExtensionGlobalError::BubbleViewAcceptButtonPressed() { if (!accept_callback_.is_null()) { accept_callback_.Run(*this, current_browser_); } }
void ExtensionGlobalError::BubbleViewAcceptButtonPressed() { if (!accept_callback_.is_null()) { accept_callback_.Run(*this, current_browser_); } }
C
Chrome
0
CVE-2016-5338
https://www.cvedetails.com/cve/CVE-2016-5338/
CWE-20
https://git.qemu.org/?p=qemu.git;a=commit;h=ff589551c8e8e9e95e211b9d8daafb4ed39f1aec
ff589551c8e8e9e95e211b9d8daafb4ed39f1aec
null
uint64_t esp_reg_read(ESPState *s, uint32_t saddr) { uint32_t old_val; trace_esp_mem_readb(saddr, s->rregs[saddr]); switch (saddr) { case ESP_FIFO: if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { /* Data out. */ qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); s->rregs[ESP_FIFO] = 0; esp_raise_irq(s); } else if (s->ti_rptr < s->ti_wptr) { s->ti_size--; s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++]; esp_raise_irq(s); } if (s->ti_rptr == s->ti_wptr) { s->ti_rptr = 0; s->ti_wptr = 0; } s->ti_wptr = 0; } break; case ESP_RINTR: /* Clear sequence step, interrupt register and all status bits except TC */ old_val = s->rregs[ESP_RINTR]; s->rregs[ESP_RINTR] = 0; s->rregs[ESP_RSTAT] &= ~STAT_TC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_lower_irq(s); return old_val; case ESP_TCHI: /* Return the unique id if the value has never been written */ if (!s->tchi_written) { return s->chip_id; } default: break; }
uint64_t esp_reg_read(ESPState *s, uint32_t saddr) { uint32_t old_val; trace_esp_mem_readb(saddr, s->rregs[saddr]); switch (saddr) { case ESP_FIFO: if (s->ti_size > 0) { s->ti_size--; if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { /* Data out. */ qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); s->rregs[ESP_FIFO] = 0; } else { s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++]; } esp_raise_irq(s); } if (s->ti_size == 0) { s->ti_rptr = 0; s->ti_wptr = 0; } s->ti_wptr = 0; } break; case ESP_RINTR: /* Clear sequence step, interrupt register and all status bits except TC */ old_val = s->rregs[ESP_RINTR]; s->rregs[ESP_RINTR] = 0; s->rregs[ESP_RSTAT] &= ~STAT_TC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_lower_irq(s); return old_val; case ESP_TCHI: /* Return the unique id if the value has never been written */ if (!s->tchi_written) { return s->chip_id; } default: break; }
C
qemu
1
CVE-2018-18344
https://www.cvedetails.com/cve/CVE-2018-18344/
CWE-20
https://github.com/chromium/chromium/commit/c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
[DevTools] Do not allow Page.setDownloadBehavior for extensions Bug: 866426 Change-Id: I71b672978e1a8ec779ede49da16b21198567d3a4 Reviewed-on: https://chromium-review.googlesource.com/c/1270007 Commit-Queue: Dmitry Gozman <dgozman@chromium.org> Reviewed-by: Devlin <rdevlin.cronin@chromium.org> Cr-Commit-Position: refs/heads/master@{#598004}
void DispatchToAgents(int frame_tree_node_id, void (Handler::*method)(MethodArgs...), Args&&... args) { FrameTreeNode* ftn = FrameTreeNode::GloballyFindByID(frame_tree_node_id); if (ftn) DispatchToAgents(ftn, method, std::forward<Args>(args)...); }
void DispatchToAgents(int frame_tree_node_id, void (Handler::*method)(MethodArgs...), Args&&... args) { FrameTreeNode* ftn = FrameTreeNode::GloballyFindByID(frame_tree_node_id); if (ftn) DispatchToAgents(ftn, method, std::forward<Args>(args)...); }
C
Chrome
0
CVE-2012-5136
https://www.cvedetails.com/cve/CVE-2012-5136/
CWE-20
https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502
401d30ef93030afbf7e81e53a11b68fc36194502
Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document The member is used only in Document, thus no reason to stay in SecurityContext. TEST=none BUG=none R=haraken@chromium.org, abarth, haraken, hayato Review URL: https://codereview.chromium.org/27615003 git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538
HTMLCanvasElement* Document::getCSSCanvasElement(const String& name) { RefPtr<HTMLCanvasElement>& element = m_cssCanvasElements.add(name, 0).iterator->value; if (!element) { element = HTMLCanvasElement::create(*this); element->setAccelerationDisabled(true); } return element.get(); }
HTMLCanvasElement* Document::getCSSCanvasElement(const String& name) { RefPtr<HTMLCanvasElement>& element = m_cssCanvasElements.add(name, 0).iterator->value; if (!element) { element = HTMLCanvasElement::create(*this); element->setAccelerationDisabled(true); } return element.get(); }
C
Chrome
0
CVE-2015-7804
https://www.cvedetails.com/cve/CVE-2015-7804/
CWE-189
https://git.php.net/?p=php-src.git;a=commit;h=1ddf72180a52d247db88ea42a3e35f824a8fbda1
1ddf72180a52d247db88ea42a3e35f824a8fbda2
null
int phar_seek_efp(phar_entry_info *entry, off_t offset, int whence, off_t position, int follow_links TSRMLS_DC) /* {{{ */ { php_stream *fp = phar_get_efp(entry, follow_links TSRMLS_CC); off_t temp, eoffset; if (!fp) { return -1; } if (follow_links) { phar_entry_info *t; t = phar_get_link_source(entry TSRMLS_CC); if (t) { entry = t; } } if (entry->is_dir) { return 0; } eoffset = phar_get_fp_offset(entry TSRMLS_CC); switch (whence) { case SEEK_END: temp = eoffset + entry->uncompressed_filesize + offset; break; case SEEK_CUR: temp = eoffset + position + offset; break; case SEEK_SET: temp = eoffset + offset; break; default: temp = 0; } if (temp > eoffset + (off_t) entry->uncompressed_filesize) { return -1; } if (temp < eoffset) { return -1; } return php_stream_seek(fp, temp, SEEK_SET); } /* }}} */
int phar_seek_efp(phar_entry_info *entry, off_t offset, int whence, off_t position, int follow_links TSRMLS_DC) /* {{{ */ { php_stream *fp = phar_get_efp(entry, follow_links TSRMLS_CC); off_t temp, eoffset; if (!fp) { return -1; } if (follow_links) { phar_entry_info *t; t = phar_get_link_source(entry TSRMLS_CC); if (t) { entry = t; } } if (entry->is_dir) { return 0; } eoffset = phar_get_fp_offset(entry TSRMLS_CC); switch (whence) { case SEEK_END: temp = eoffset + entry->uncompressed_filesize + offset; break; case SEEK_CUR: temp = eoffset + position + offset; break; case SEEK_SET: temp = eoffset + offset; break; default: temp = 0; } if (temp > eoffset + (off_t) entry->uncompressed_filesize) { return -1; } if (temp < eoffset) { return -1; } return php_stream_seek(fp, temp, SEEK_SET); } /* }}} */
C
php
0
CVE-2018-17942
https://www.cvedetails.com/cve/CVE-2018-17942/
CWE-119
https://github.com/coreutils/gnulib/commit/278b4175c9d7dd47c1a3071554aac02add3b3c35
278b4175c9d7dd47c1a3071554aac02add3b3c35
vasnprintf: Fix heap memory overrun bug. Reported by Ben Pfaff <blp@cs.stanford.edu> in <https://lists.gnu.org/archive/html/bug-gnulib/2018-09/msg00107.html>. * lib/vasnprintf.c (convert_to_decimal): Allocate one more byte of memory. * tests/test-vasnprintf.c (test_function): Add another test.
scale10_round_decimal_long_double (long double x, int n) { int e IF_LINT(= 0); mpn_t m; void *memory = decode_long_double (x, &e, &m); return scale10_round_decimal_decoded (e, m, memory, n); }
scale10_round_decimal_long_double (long double x, int n) { int e IF_LINT(= 0); mpn_t m; void *memory = decode_long_double (x, &e, &m); return scale10_round_decimal_decoded (e, m, memory, n); }
C
gnulib
0
CVE-2015-1214
https://www.cvedetails.com/cve/CVE-2015-1214/
CWE-190
https://github.com/chromium/chromium/commit/a81c185f34b34ef8410239506825b185b332c00b
a81c185f34b34ef8410239506825b185b332c00b
Add data usage tracking for chrome services Add data usage tracking for captive portal, web resource and signin services BUG=655749 Review-Url: https://codereview.chromium.org/2643013004 Cr-Commit-Position: refs/heads/master@{#445810}
void GaiaCookieManagerService::OnMergeSessionSuccess(const std::string& data) { VLOG(1) << "MergeSession successful account=" << requests_.front().account_id(); DCHECK(requests_.front().request_type() == GaiaCookieRequestType::ADD_ACCOUNT); list_accounts_stale_ = true; const std::string account_id = requests_.front().account_id(); HandleNextRequest(); SignalComplete(account_id, GoogleServiceAuthError::AuthErrorNone()); fetcher_backoff_.InformOfRequest(true); uber_token_ = std::string(); }
void GaiaCookieManagerService::OnMergeSessionSuccess(const std::string& data) { VLOG(1) << "MergeSession successful account=" << requests_.front().account_id(); DCHECK(requests_.front().request_type() == GaiaCookieRequestType::ADD_ACCOUNT); list_accounts_stale_ = true; const std::string account_id = requests_.front().account_id(); HandleNextRequest(); SignalComplete(account_id, GoogleServiceAuthError::AuthErrorNone()); fetcher_backoff_.InformOfRequest(true); uber_token_ = std::string(); }
C
Chrome
0
CVE-2012-3520
https://www.cvedetails.com/cve/CVE-2012-3520/
CWE-287
https://github.com/torvalds/linux/commit/e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
e0e3cea46d31d23dc40df0a49a7a2c04fe8edfea
af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Petr Matousek <pmatouse@redhat.com> Cc: Florian Weimer <fweimer@redhat.com> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; int i, j; ++*pos; if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); iter = seq->private; s = v; do { s = sk_next(s); } while (s && sock_net(s) != seq_file_net(seq)); if (s) return s; i = iter->link; j = iter->hash_idx + 1; do { struct nl_pid_hash *hash = &nl_table[i].hash; for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); while (s && sock_net(s) != seq_file_net(seq)) s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; return s; } } j = 0; } while (++i < MAX_LINKS); return NULL; }
static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; int i, j; ++*pos; if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); iter = seq->private; s = v; do { s = sk_next(s); } while (s && sock_net(s) != seq_file_net(seq)); if (s) return s; i = iter->link; j = iter->hash_idx + 1; do { struct nl_pid_hash *hash = &nl_table[i].hash; for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); while (s && sock_net(s) != seq_file_net(seq)) s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; return s; } } j = 0; } while (++i < MAX_LINKS); return NULL; }
C
linux
0
CVE-2014-9710
https://www.cvedetails.com/cve/CVE-2014-9710/
CWE-362
https://github.com/torvalds/linux/commit/5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
Btrfs: make xattr replace operations atomic Replacing a xattr consists of doing a lookup for its existing value, delete the current value from the respective leaf, release the search path and then finally insert the new value. This leaves a time window where readers (getxattr, listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs, so this has security implications. This change also fixes 2 other existing issues which were: *) Deleting the old xattr value without verifying first if the new xattr will fit in the existing leaf item (in case multiple xattrs are packed in the same item due to name hash collision); *) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't exist but we have have an existing item that packs muliple xattrs with the same name hash as the input xattr. In this case we should return ENOSPC. A test case for xfstests follows soon. Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace implementation. Reported-by: Alexandre Oliva <oliva@gnu.org> Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; u32 nritems; unsigned int data_end; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; int slot; struct btrfs_map_token token; if (path->slots[0] == 0) { btrfs_cpu_key_to_disk(&disk_key, cpu_key); fixup_low_keys(root, path, &disk_key, 1); } btrfs_unlock_up_safe(path, 1); btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); btrfs_crit(root->fs_info, "not enough freespace need %u have %d", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); if (old_data < data_end) { btrfs_print_leaf(root, leaf); btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", slot, old_data, data_end); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr( i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - total_data, &token); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - total_data, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; } /* setup the item for the new data */ for (i = 0; i < nr; i++) { btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(slot + i); btrfs_set_token_item_offset(leaf, item, data_end - data_size[i], &token); data_end -= data_size[i]; btrfs_set_token_item_size(leaf, item, data_size[i], &token); } btrfs_set_header_nritems(leaf, nritems + nr); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } }
void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 *data_size, u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; u32 nritems; unsigned int data_end; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; int slot; struct btrfs_map_token token; if (path->slots[0] == 0) { btrfs_cpu_key_to_disk(&disk_key, cpu_key); fixup_low_keys(root, path, &disk_key, 1); } btrfs_unlock_up_safe(path, 1); btrfs_init_map_token(&token); leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); data_end = leaf_data_end(root, leaf); if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); btrfs_crit(root->fs_info, "not enough freespace need %u have %d", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); if (old_data < data_end) { btrfs_print_leaf(root, leaf); btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", slot, old_data, data_end); BUG_ON(1); } /* * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr( i); ioff = btrfs_token_item_offset(leaf, item, &token); btrfs_set_token_item_offset(leaf, item, ioff - total_data, &token); } /* shift the items */ memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), btrfs_item_nr_offset(slot), (nritems - slot) * sizeof(struct btrfs_item)); /* shift the data */ memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end - total_data, btrfs_leaf_data(leaf) + data_end, old_data - data_end); data_end = old_data; } /* setup the item for the new data */ for (i = 0; i < nr; i++) { btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); btrfs_set_item_key(leaf, &disk_key, slot + i); item = btrfs_item_nr(slot + i); btrfs_set_token_item_offset(leaf, item, data_end - data_size[i], &token); data_end -= data_size[i]; btrfs_set_token_item_size(leaf, item, data_size[i], &token); } btrfs_set_header_nritems(leaf, nritems + nr); btrfs_mark_buffer_dirty(leaf); if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } }
C
linux
0
CVE-2011-2861
https://www.cvedetails.com/cve/CVE-2011-2861/
CWE-20
https://github.com/chromium/chromium/commit/8262245d384be025f13e2a5b3a03b7e5c98374ce
8262245d384be025f13e2a5b3a03b7e5c98374ce
DevTools: move DevToolsAgent/Client into content. BUG=84078 TEST= Review URL: http://codereview.chromium.org/7461019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98
void BrowserRenderProcessHost::ReceivedBadMessage() { if (run_renderer_in_process()) { CHECK(false); } NOTREACHED(); base::KillProcess(GetHandle(), content::RESULT_CODE_KILLED_BAD_MESSAGE, false); }
void BrowserRenderProcessHost::ReceivedBadMessage() { if (run_renderer_in_process()) { CHECK(false); } NOTREACHED(); base::KillProcess(GetHandle(), content::RESULT_CODE_KILLED_BAD_MESSAGE, false); }
C
Chrome
0
CVE-2011-4621
https://www.cvedetails.com/cve/CVE-2011-4621/
null
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <efault@gmx.de> Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com> Tested-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org LKML-Reference: <1291802742.1417.9.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static unsigned long power_of(int cpu) { return cpu_rq(cpu)->cpu_power; }
static unsigned long power_of(int cpu) { return cpu_rq(cpu)->cpu_power; }
C
linux
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Don Zickus <dzickus@redhat.com> Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs) { if ((code & 0xf000) == 0xf000) return id_fnmx(fregs, regs, code); else return id_sys(fregs, regs, code); }
static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs) { if ((code & 0xf000) == 0xf000) return id_fnmx(fregs, regs, code); else return id_sys(fregs, regs, code); }
C
linux
0
CVE-2011-1477
https://www.cvedetails.com/cve/CVE-2011-1477/
CWE-119
https://github.com/torvalds/linux/commit/4d00135a680727f6c3be78f8befaac009030e4df
4d00135a680727f6c3be78f8befaac009030e4df
sound/oss/opl3: validate voice and channel indexes User-controllable indexes for voice and channel values may cause reading and writing beyond the bounds of their respective arrays, leading to potentially exploitable memory corruption. Validate these indexes. Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com> Cc: stable@kernel.org Signed-off-by: Takashi Iwai <tiwai@suse.de>
static int opl3_ioctl(int dev, unsigned int cmd, void __user * arg) { struct sbi_instrument ins; switch (cmd) { case SNDCTL_FM_LOAD_INSTR: printk(KERN_WARNING "Warning: Obsolete ioctl(SNDCTL_FM_LOAD_INSTR) used. Fix the program.\n"); if (copy_from_user(&ins, arg, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) { printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel); return -EINVAL; } return store_instr(ins.channel, &ins); case SNDCTL_SYNTH_INFO: devc->fm_info.nr_voices = (devc->nr_voice == 12) ? 6 : devc->nr_voice; if (copy_to_user(arg, &devc->fm_info, sizeof(devc->fm_info))) return -EFAULT; return 0; case SNDCTL_SYNTH_MEMAVL: return 0x7fffffff; case SNDCTL_FM_4OP_ENABLE: if (devc->model == 2) enter_4op_mode(); return 0; default: return -EINVAL; } }
static int opl3_ioctl(int dev, unsigned int cmd, void __user * arg) { struct sbi_instrument ins; switch (cmd) { case SNDCTL_FM_LOAD_INSTR: printk(KERN_WARNING "Warning: Obsolete ioctl(SNDCTL_FM_LOAD_INSTR) used. Fix the program.\n"); if (copy_from_user(&ins, arg, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) { printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel); return -EINVAL; } return store_instr(ins.channel, &ins); case SNDCTL_SYNTH_INFO: devc->fm_info.nr_voices = (devc->nr_voice == 12) ? 6 : devc->nr_voice; if (copy_to_user(arg, &devc->fm_info, sizeof(devc->fm_info))) return -EFAULT; return 0; case SNDCTL_SYNTH_MEMAVL: return 0x7fffffff; case SNDCTL_FM_4OP_ENABLE: if (devc->model == 2) enter_4op_mode(); return 0; default: return -EINVAL; } }
C
linux
0
CVE-2013-6626
https://www.cvedetails.com/cve/CVE-2013-6626/
null
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
90fb08ed0146c9beacfd4dde98a20fc45419fff3
Cancel JavaScript dialogs when an interstitial appears. BUG=295695 TEST=See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/24360011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
DestructionObserver(WebContentsImpl* owner, WebContents* watched_contents) : WebContentsObserver(watched_contents), owner_(owner) { }
DestructionObserver(WebContentsImpl* owner, WebContents* watched_contents) : WebContentsObserver(watched_contents), owner_(owner) { }
C
Chrome
0
CVE-2018-16427
https://www.cvedetails.com/cve/CVE-2018-16427/
CWE-125
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
static int piv_write_binary(sc_card_t *card, unsigned int idx, const u8 *buf, size_t count, unsigned long flags) { piv_private_data_t * priv = PIV_DATA(card); int r; int enumtag; LOG_FUNC_CALLED(card->ctx); if (priv->selected_obj < 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); enumtag = piv_objects[priv->selected_obj].enumtag; if (priv->rwb_state == 1) /* trying to write at end */ LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); if (priv->rwb_state == -1) { /* if cached, remove old entry */ if (priv->obj_cache[enumtag].flags & PIV_OBJ_CACHE_VALID) { priv->obj_cache[enumtag].flags = 0; if (priv->obj_cache[enumtag].obj_data) { free(priv->obj_cache[enumtag].obj_data); priv->obj_cache[enumtag].obj_data = NULL; priv->obj_cache[enumtag].obj_len = 0; } if (priv->obj_cache[enumtag].internal_obj_data) { free(priv->obj_cache[enumtag].internal_obj_data); priv->obj_cache[enumtag].internal_obj_data = NULL; priv->obj_cache[enumtag].internal_obj_len = 0; } } if (idx != 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_NO_CARD_SUPPORT); priv->w_buf_len = flags>>8; if (priv->w_buf_len == 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); priv->w_buf = malloc(priv->w_buf_len); priv-> rwb_state = 0; } /* on each pass make sure we have w_buf */ if (priv->w_buf == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); if (idx + count > priv->w_buf_len) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); memcpy(priv->w_buf + idx, buf, count); /* copy one chunk */ /* if this was not the last chunk, return to get rest */ if (idx + count < priv->w_buf_len) LOG_FUNC_RETURN(card->ctx, count); priv-> rwb_state = 1; /* at end of object */ switch (flags & 0x0f) { case 1: r = piv_write_certificate(card, priv->w_buf, priv->w_buf_len, flags & 0x10); break; case 2: /* pubkey to be added to cache, it should have 0x53 and 0x99 tags. */ /* TODO: -DEE this is not fully implemented and not used */ r = priv->w_buf_len; break; default: r = piv_put_data(card, enumtag, priv->w_buf, priv->w_buf_len); break; } /* if it worked, will cache it */ if (r >= 0 && priv->w_buf) { priv->obj_cache[enumtag].flags |= PIV_OBJ_CACHE_VALID; priv->obj_cache[enumtag].obj_data = priv->w_buf; priv->obj_cache[enumtag].obj_len = priv->w_buf_len; } else { if (priv->w_buf) free(priv->w_buf); } priv->w_buf = NULL; priv->w_buf_len = 0; LOG_FUNC_RETURN(card->ctx, (r < 0)? r : (int)count); }
static int piv_write_binary(sc_card_t *card, unsigned int idx, const u8 *buf, size_t count, unsigned long flags) { piv_private_data_t * priv = PIV_DATA(card); int r; int enumtag; LOG_FUNC_CALLED(card->ctx); if (priv->selected_obj < 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); enumtag = piv_objects[priv->selected_obj].enumtag; if (priv->rwb_state == 1) /* trying to write at end */ LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); if (priv->rwb_state == -1) { /* if cached, remove old entry */ if (priv->obj_cache[enumtag].flags & PIV_OBJ_CACHE_VALID) { priv->obj_cache[enumtag].flags = 0; if (priv->obj_cache[enumtag].obj_data) { free(priv->obj_cache[enumtag].obj_data); priv->obj_cache[enumtag].obj_data = NULL; priv->obj_cache[enumtag].obj_len = 0; } if (priv->obj_cache[enumtag].internal_obj_data) { free(priv->obj_cache[enumtag].internal_obj_data); priv->obj_cache[enumtag].internal_obj_data = NULL; priv->obj_cache[enumtag].internal_obj_len = 0; } } if (idx != 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_NO_CARD_SUPPORT); priv->w_buf_len = flags>>8; if (priv->w_buf_len == 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL); priv->w_buf = malloc(priv->w_buf_len); priv-> rwb_state = 0; } /* on each pass make sure we have w_buf */ if (priv->w_buf == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); if (idx + count > priv->w_buf_len) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OBJECT_NOT_VALID); memcpy(priv->w_buf + idx, buf, count); /* copy one chunk */ /* if this was not the last chunk, return to get rest */ if (idx + count < priv->w_buf_len) LOG_FUNC_RETURN(card->ctx, count); priv-> rwb_state = 1; /* at end of object */ switch (flags & 0x0f) { case 1: r = piv_write_certificate(card, priv->w_buf, priv->w_buf_len, flags & 0x10); break; case 2: /* pubkey to be added to cache, it should have 0x53 and 0x99 tags. */ /* TODO: -DEE this is not fully implemented and not used */ r = priv->w_buf_len; break; default: r = piv_put_data(card, enumtag, priv->w_buf, priv->w_buf_len); break; } /* if it worked, will cache it */ if (r >= 0 && priv->w_buf) { priv->obj_cache[enumtag].flags |= PIV_OBJ_CACHE_VALID; priv->obj_cache[enumtag].obj_data = priv->w_buf; priv->obj_cache[enumtag].obj_len = priv->w_buf_len; } else { if (priv->w_buf) free(priv->w_buf); } priv->w_buf = NULL; priv->w_buf_len = 0; LOG_FUNC_RETURN(card->ctx, (r < 0)? r : (int)count); }
C
OpenSC
0
CVE-2015-6765
https://www.cvedetails.com/cve/CVE-2015-6765/
null
https://github.com/chromium/chromium/commit/e5c298b780737c53fa9aae44d6fef522931d88b0
e5c298b780737c53fa9aae44d6fef522931d88b0
AppCache: fix a browser crashing bug that can happen during updates. BUG=558589 Review URL: https://codereview.chromium.org/1463463003 Cr-Commit-Position: refs/heads/master@{#360967}
void SendErrorNotifications(const AppCacheErrorDetails& details) { DCHECK(!details.message.empty()); for (NotifyHostMap::iterator it = hosts_to_notify.begin(); it != hosts_to_notify.end(); ++it) { AppCacheFrontend* frontend = it->first; frontend->OnErrorEventRaised(it->second, details); } }
void SendErrorNotifications(const AppCacheErrorDetails& details) { DCHECK(!details.message.empty()); for (NotifyHostMap::iterator it = hosts_to_notify.begin(); it != hosts_to_notify.end(); ++it) { AppCacheFrontend* frontend = it->first; frontend->OnErrorEventRaised(it->second, details); } }
C
Chrome
0
CVE-2017-11472
https://www.cvedetails.com/cve/CVE-2017-11472/
CWE-755
https://github.com/acpica/acpica/commit/a23325b2e583556eae88ed3f764e457786bf4df6
a23325b2e583556eae88ed3f764e457786bf4df6
Namespace: fix operand cache leak I found some ACPI operand cache leaks in ACPI early abort cases. Boot log of ACPI operand cache leak is as follows: >[ 0.174332] ACPI: Added _OSI(Module Device) >[ 0.175504] ACPI: Added _OSI(Processor Device) >[ 0.176010] ACPI: Added _OSI(3.0 _SCP Extensions) >[ 0.177032] ACPI: Added _OSI(Processor Aggregator Device) >[ 0.178284] ACPI: SCI (IRQ16705) allocation failed >[ 0.179352] ACPI Exception: AE_NOT_ACQUIRED, Unable to install System Control Interrupt handler (20160930/evevent-131) >[ 0.180008] ACPI: Unable to start the ACPI Interpreter >[ 0.181125] ACPI Error: Could not remove SCI handler (20160930/evmisc-281) >[ 0.184068] kmem_cache_destroy Acpi-Operand: Slab cache still has objects >[ 0.185358] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc3 #2 >[ 0.186820] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 >[ 0.188000] Call Trace: >[ 0.188000] ? dump_stack+0x5c/0x7d >[ 0.188000] ? kmem_cache_destroy+0x224/0x230 >[ 0.188000] ? acpi_sleep_proc_init+0x22/0x22 >[ 0.188000] ? acpi_os_delete_cache+0xa/0xd >[ 0.188000] ? acpi_ut_delete_caches+0x3f/0x7b >[ 0.188000] ? acpi_terminate+0x5/0xf >[ 0.188000] ? acpi_init+0x288/0x32e >[ 0.188000] ? __class_create+0x4c/0x80 >[ 0.188000] ? video_setup+0x7a/0x7a >[ 0.188000] ? do_one_initcall+0x4e/0x1b0 >[ 0.188000] ? kernel_init_freeable+0x194/0x21a >[ 0.188000] ? rest_init+0x80/0x80 >[ 0.188000] ? kernel_init+0xa/0x100 >[ 0.188000] ? ret_from_fork+0x25/0x30 When early abort is occurred due to invalid ACPI information, Linux kernel terminates ACPI by calling AcpiTerminate() function. The function calls AcpiNsTerminate() function to delete namespace data and ACPI operand cache (AcpiGbl_ModuleCodeList). But the deletion code in AcpiNsTerminate() function is wrapped in ACPI_EXEC_APP definition, therefore the code is only executed when the definition exists. If the define doesn't exist, ACPI operand cache (AcpiGbl_ModuleCodeList) is leaked, and stack dump is shown in kernel log. This causes a security threat because the old kernel (<= 4.9) shows memory locations of kernel functions in stack dump, therefore kernel ASLR can be neutralized. To fix ACPI operand leak for enhancing security, I made a patch which removes the ACPI_EXEC_APP define in AcpiNsTerminate() function for executing the deletion code unconditionally. Signed-off-by: Seunghun Han <kkamagui@gmail.com> Signed-off-by: Lv Zheng <lv.zheng@intel.com>
AcpiNsGetInternalNameLength ( ACPI_NAMESTRING_INFO *Info) { const char *NextExternalChar; UINT32 i; ACPI_FUNCTION_ENTRY (); NextExternalChar = Info->ExternalName; Info->NumCarats = 0; Info->NumSegments = 0; Info->FullyQualified = FALSE; /* * For the internal name, the required length is 4 bytes per segment, * plus 1 each for RootPrefix, MultiNamePrefixOp, segment count, * trailing null (which is not really needed, but no there's harm in * putting it there) * * strlen() + 1 covers the first NameSeg, which has no path separator */ if (ACPI_IS_ROOT_PREFIX (*NextExternalChar)) { Info->FullyQualified = TRUE; NextExternalChar++; /* Skip redundant RootPrefix, like \\_SB.PCI0.SBRG.EC0 */ while (ACPI_IS_ROOT_PREFIX (*NextExternalChar)) { NextExternalChar++; } } else { /* Handle Carat prefixes */ while (ACPI_IS_PARENT_PREFIX (*NextExternalChar)) { Info->NumCarats++; NextExternalChar++; } } /* * Determine the number of ACPI name "segments" by counting the number of * path separators within the string. Start with one segment since the * segment count is [(# separators) + 1], and zero separators is ok. */ if (*NextExternalChar) { Info->NumSegments = 1; for (i = 0; NextExternalChar[i]; i++) { if (ACPI_IS_PATH_SEPARATOR (NextExternalChar[i])) { Info->NumSegments++; } } } Info->Length = (ACPI_NAME_SIZE * Info->NumSegments) + 4 + Info->NumCarats; Info->NextExternalChar = NextExternalChar; }
AcpiNsGetInternalNameLength ( ACPI_NAMESTRING_INFO *Info) { const char *NextExternalChar; UINT32 i; ACPI_FUNCTION_ENTRY (); NextExternalChar = Info->ExternalName; Info->NumCarats = 0; Info->NumSegments = 0; Info->FullyQualified = FALSE; /* * For the internal name, the required length is 4 bytes per segment, * plus 1 each for RootPrefix, MultiNamePrefixOp, segment count, * trailing null (which is not really needed, but no there's harm in * putting it there) * * strlen() + 1 covers the first NameSeg, which has no path separator */ if (ACPI_IS_ROOT_PREFIX (*NextExternalChar)) { Info->FullyQualified = TRUE; NextExternalChar++; /* Skip redundant RootPrefix, like \\_SB.PCI0.SBRG.EC0 */ while (ACPI_IS_ROOT_PREFIX (*NextExternalChar)) { NextExternalChar++; } } else { /* Handle Carat prefixes */ while (ACPI_IS_PARENT_PREFIX (*NextExternalChar)) { Info->NumCarats++; NextExternalChar++; } } /* * Determine the number of ACPI name "segments" by counting the number of * path separators within the string. Start with one segment since the * segment count is [(# separators) + 1], and zero separators is ok. */ if (*NextExternalChar) { Info->NumSegments = 1; for (i = 0; NextExternalChar[i]; i++) { if (ACPI_IS_PATH_SEPARATOR (NextExternalChar[i])) { Info->NumSegments++; } } } Info->Length = (ACPI_NAME_SIZE * Info->NumSegments) + 4 + Info->NumCarats; Info->NextExternalChar = NextExternalChar; }
C
linux
0
CVE-2016-3062
https://www.cvedetails.com/cve/CVE-2016-3062/
CWE-119
https://github.com/FFmpeg/FFmpeg/commit/689e59b7ffed34eba6159dcc78e87133862e3746
689e59b7ffed34eba6159dcc78e87133862e3746
mov: reset dref_count on realloc to keep values consistent. This fixes a potential crash. Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ entries = avio_rb32(pb); av_dlog(c->fc, "keyframe_count = %d\n", entries); if (!entries) { sc->keyframe_absent = 1; return 0; } if (entries >= UINT_MAX / sizeof(int)) return AVERROR_INVALIDDATA; sc->keyframes = av_malloc(entries * sizeof(int)); if (!sc->keyframes) return AVERROR(ENOMEM); sc->keyframe_count = entries; for (i=0; i<entries; i++) { sc->keyframes[i] = avio_rb32(pb); } return 0; }
static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned int i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ entries = avio_rb32(pb); av_dlog(c->fc, "keyframe_count = %d\n", entries); if (!entries) { sc->keyframe_absent = 1; return 0; } if (entries >= UINT_MAX / sizeof(int)) return AVERROR_INVALIDDATA; sc->keyframes = av_malloc(entries * sizeof(int)); if (!sc->keyframes) return AVERROR(ENOMEM); sc->keyframe_count = entries; for (i=0; i<entries; i++) { sc->keyframes[i] = avio_rb32(pb); } return 0; }
C
FFmpeg
0
CVE-2013-2929
https://www.cvedetails.com/cve/CVE-2013-2929/
CWE-264
https://github.com/torvalds/linux/commit/d049f74f2dbe71354d43d393ac3a188947811348
d049f74f2dbe71354d43d393ac3a188947811348
exec/ptrace: fix get_dumpable() incorrect tests The get_dumpable() return value is not boolean. Most users of the function actually want to be testing for non-SUID_DUMP_USER(1) rather than SUID_DUMP_DISABLE(0). The SUID_DUMP_ROOT(2) is also considered a protected state. Almost all places did this correctly, excepting the two places fixed in this patch. Wrong logic: if (dumpable == SUID_DUMP_DISABLE) { /* be protective */ } or if (dumpable == 0) { /* be protective */ } or if (!dumpable) { /* be protective */ } Correct logic: if (dumpable != SUID_DUMP_USER) { /* be protective */ } or if (dumpable != 1) { /* be protective */ } Without this patch, if the system had set the sysctl fs/suid_dumpable=2, a user was able to ptrace attach to processes that had dropped privileges to that user. (This may have been partially mitigated if Yama was enabled.) The macros have been moved into the file that declares get/set_dumpable(), which means things like the ia64 code can see them too. CVE-2013-2929 Reported-by: Vasily Kulikov <segoon@openwall.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); return !err; }
bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); return !err; }
C
linux
0
CVE-2016-0826
https://www.cvedetails.com/cve/CVE-2016-0826/
CWE-264
https://android.googlesource.com/platform/frameworks/av/+/c9ab2b0bb05a7e19fb057e79b36e232809d70122
c9ab2b0bb05a7e19fb057e79b36e232809d70122
Camera: Disallow dumping clients directly Camera service dumps should only be initiated through ICameraService::dump. Bug: 26265403 Change-Id: If3ca4718ed74bf33ad8a416192689203029e2803
status_t Camera2Client::takePicture(int msgType) { ATRACE_CALL(); Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; int takePictureCounter; { SharedParameters::Lock l(mParameters); switch (l.mParameters.state) { case Parameters::DISCONNECTED: case Parameters::STOPPED: case Parameters::WAITING_FOR_PREVIEW_WINDOW: ALOGE("%s: Camera %d: Cannot take picture without preview enabled", __FUNCTION__, mCameraId); return INVALID_OPERATION; case Parameters::PREVIEW: res = commandStopFaceDetectionL(l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Unable to stop face detection for still capture", __FUNCTION__, mCameraId); return res; } l.mParameters.state = Parameters::STILL_CAPTURE; break; case Parameters::RECORD: l.mParameters.state = Parameters::VIDEO_SNAPSHOT; break; case Parameters::STILL_CAPTURE: case Parameters::VIDEO_SNAPSHOT: ALOGE("%s: Camera %d: Already taking a picture", __FUNCTION__, mCameraId); return INVALID_OPERATION; } ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId); res = updateProcessorStream(mJpegProcessor, l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); return res; } takePictureCounter = ++l.mParameters.takePictureCounter; } ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter); syncWithDevice(); res = mCaptureSequencer->startCapture(msgType); if (res != OK) { ALOGE("%s: Camera %d: Unable to start capture: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); } return res; }
status_t Camera2Client::takePicture(int msgType) { ATRACE_CALL(); Mutex::Autolock icl(mBinderSerializationLock); status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; int takePictureCounter; { SharedParameters::Lock l(mParameters); switch (l.mParameters.state) { case Parameters::DISCONNECTED: case Parameters::STOPPED: case Parameters::WAITING_FOR_PREVIEW_WINDOW: ALOGE("%s: Camera %d: Cannot take picture without preview enabled", __FUNCTION__, mCameraId); return INVALID_OPERATION; case Parameters::PREVIEW: res = commandStopFaceDetectionL(l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Unable to stop face detection for still capture", __FUNCTION__, mCameraId); return res; } l.mParameters.state = Parameters::STILL_CAPTURE; break; case Parameters::RECORD: l.mParameters.state = Parameters::VIDEO_SNAPSHOT; break; case Parameters::STILL_CAPTURE: case Parameters::VIDEO_SNAPSHOT: ALOGE("%s: Camera %d: Already taking a picture", __FUNCTION__, mCameraId); return INVALID_OPERATION; } ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId); res = updateProcessorStream(mJpegProcessor, l.mParameters); if (res != OK) { ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); return res; } takePictureCounter = ++l.mParameters.takePictureCounter; } ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter); syncWithDevice(); res = mCaptureSequencer->startCapture(msgType); if (res != OK) { ALOGE("%s: Camera %d: Unable to start capture: %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res); } return res; }
C
Android
0
CVE-2015-8558
https://www.cvedetails.com/cve/CVE-2015-8558/
CWE-20
https://git.qemu.org/?p=qemu.git;a=commit;h=156a2e4dbffa85997636a7a39ef12da6f1b40254
156a2e4dbffa85997636a7a39ef12da6f1b40254
null
static void ehci_advance_state(EHCIState *ehci, int async) { EHCIQueue *q = NULL; int again; do { switch(ehci_get_state(ehci, async)) { case EST_WAITLISTHEAD: again = ehci_state_waitlisthead(ehci, async); break; case EST_FETCHENTRY: again = ehci_state_fetchentry(ehci, async); break; case EST_FETCHQH: q = ehci_state_fetchqh(ehci, async); if (q != NULL) { assert(q->async == async); again = 1; } else { again = 0; } break; case EST_FETCHITD: again = ehci_state_fetchitd(ehci, async); break; case EST_FETCHSITD: again = ehci_state_fetchsitd(ehci, async); break; case EST_ADVANCEQUEUE: assert(q != NULL); again = ehci_state_advqueue(q); break; case EST_FETCHQTD: assert(q != NULL); again = ehci_state_fetchqtd(q); break; case EST_HORIZONTALQH: assert(q != NULL); again = ehci_state_horizqh(q); break; case EST_EXECUTE: assert(q != NULL); again = ehci_state_execute(q); if (async) { ehci->async_stepdown = 0; } break; case EST_EXECUTING: assert(q != NULL); if (async) { ehci->async_stepdown = 0; } again = ehci_state_executing(q); break; case EST_WRITEBACK: assert(q != NULL); again = ehci_state_writeback(q); if (!async) { ehci->periodic_sched_active = PERIODIC_ACTIVE; } break; default: fprintf(stderr, "Bad state!\n"); again = -1; g_assert_not_reached(); break; } if (again < 0) { fprintf(stderr, "processing error - resetting ehci HC\n"); ehci_reset(ehci); again = 0; } } while (again); }
static void ehci_advance_state(EHCIState *ehci, int async) { EHCIQueue *q = NULL; int again; do { switch(ehci_get_state(ehci, async)) { case EST_WAITLISTHEAD: again = ehci_state_waitlisthead(ehci, async); break; case EST_FETCHENTRY: again = ehci_state_fetchentry(ehci, async); break; case EST_FETCHQH: q = ehci_state_fetchqh(ehci, async); if (q != NULL) { assert(q->async == async); again = 1; } else { again = 0; } break; case EST_FETCHITD: again = ehci_state_fetchitd(ehci, async); break; case EST_FETCHSITD: again = ehci_state_fetchsitd(ehci, async); break; case EST_ADVANCEQUEUE: assert(q != NULL); again = ehci_state_advqueue(q); break; case EST_FETCHQTD: assert(q != NULL); again = ehci_state_fetchqtd(q); break; case EST_HORIZONTALQH: assert(q != NULL); again = ehci_state_horizqh(q); break; case EST_EXECUTE: assert(q != NULL); again = ehci_state_execute(q); if (async) { ehci->async_stepdown = 0; } break; case EST_EXECUTING: assert(q != NULL); if (async) { ehci->async_stepdown = 0; } again = ehci_state_executing(q); break; case EST_WRITEBACK: assert(q != NULL); again = ehci_state_writeback(q); if (!async) { ehci->periodic_sched_active = PERIODIC_ACTIVE; } break; default: fprintf(stderr, "Bad state!\n"); again = -1; g_assert_not_reached(); break; } if (again < 0) { fprintf(stderr, "processing error - resetting ehci HC\n"); ehci_reset(ehci); again = 0; } } while (again); }
C
qemu
0
CVE-2018-17206
https://www.cvedetails.com/cve/CVE-2018-17206/
null
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
9237a63c47bd314b807cda0bd2216264e82edbe8
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <blp@ovn.org> Acked-by: Justin Pettit <jpettit@ovn.org>
format_SAMPLE(const struct ofpact_sample *a, struct ds *s) { ds_put_format(s, "%ssample(%s%sprobability=%s%"PRIu16 ",%scollector_set_id=%s%"PRIu32 ",%sobs_domain_id=%s%"PRIu32 ",%sobs_point_id=%s%"PRIu32, colors.paren, colors.end, colors.param, colors.end, a->probability, colors.param, colors.end, a->collector_set_id, colors.param, colors.end, a->obs_domain_id, colors.param, colors.end, a->obs_point_id); if (a->sampling_port != OFPP_NONE) { ds_put_format(s, ",%ssampling_port=%s%"PRIu32, colors.param, colors.end, a->sampling_port); } if (a->direction == NX_ACTION_SAMPLE_INGRESS) { ds_put_format(s, ",%singress%s", colors.param, colors.end); } else if (a->direction == NX_ACTION_SAMPLE_EGRESS) { ds_put_format(s, ",%segress%s", colors.param, colors.end); } ds_put_format(s, "%s)%s", colors.paren, colors.end); }
format_SAMPLE(const struct ofpact_sample *a, struct ds *s) { ds_put_format(s, "%ssample(%s%sprobability=%s%"PRIu16 ",%scollector_set_id=%s%"PRIu32 ",%sobs_domain_id=%s%"PRIu32 ",%sobs_point_id=%s%"PRIu32, colors.paren, colors.end, colors.param, colors.end, a->probability, colors.param, colors.end, a->collector_set_id, colors.param, colors.end, a->obs_domain_id, colors.param, colors.end, a->obs_point_id); if (a->sampling_port != OFPP_NONE) { ds_put_format(s, ",%ssampling_port=%s%"PRIu32, colors.param, colors.end, a->sampling_port); } if (a->direction == NX_ACTION_SAMPLE_INGRESS) { ds_put_format(s, ",%singress%s", colors.param, colors.end); } else if (a->direction == NX_ACTION_SAMPLE_EGRESS) { ds_put_format(s, ",%segress%s", colors.param, colors.end); } ds_put_format(s, "%s)%s", colors.paren, colors.end); }
C
ovs
0
CVE-2019-11360
https://www.cvedetails.com/cve/CVE-2019-11360/
CWE-119
https://git.netfilter.org/iptables/commit/iptables/xshared.c?id=2ae1099a42e6a0f06de305ca13a842ac83d4683e
2ae1099a42e6a0f06de305ca13a842ac83d4683e
null
static int xtables_lock(int wait, struct timeval *wait_interval) { struct timeval time_left, wait_time; int fd, i = 0; time_left.tv_sec = wait; time_left.tv_usec = 0; fd = open(XT_LOCK_NAME, O_CREAT, 0600); if (fd < 0) { fprintf(stderr, "Fatal: can't open lock file %s: %s\n", XT_LOCK_NAME, strerror(errno)); return XT_LOCK_FAILED; } if (wait == -1) { if (flock(fd, LOCK_EX) == 0) return fd; fprintf(stderr, "Can't lock %s: %s\n", XT_LOCK_NAME, strerror(errno)); return XT_LOCK_BUSY; } while (1) { if (flock(fd, LOCK_EX | LOCK_NB) == 0) return fd; else if (timercmp(&time_left, wait_interval, <)) return XT_LOCK_BUSY; if (++i % 10 == 0) { fprintf(stderr, "Another app is currently holding the xtables lock; " "still %lds %ldus time ahead to have a chance to grab the lock...\n", time_left.tv_sec, time_left.tv_usec); } wait_time = *wait_interval; select(0, NULL, NULL, NULL, &wait_time); timersub(&time_left, wait_interval, &time_left); } }
static int xtables_lock(int wait, struct timeval *wait_interval) { struct timeval time_left, wait_time; int fd, i = 0; time_left.tv_sec = wait; time_left.tv_usec = 0; fd = open(XT_LOCK_NAME, O_CREAT, 0600); if (fd < 0) { fprintf(stderr, "Fatal: can't open lock file %s: %s\n", XT_LOCK_NAME, strerror(errno)); return XT_LOCK_FAILED; } if (wait == -1) { if (flock(fd, LOCK_EX) == 0) return fd; fprintf(stderr, "Can't lock %s: %s\n", XT_LOCK_NAME, strerror(errno)); return XT_LOCK_BUSY; } while (1) { if (flock(fd, LOCK_EX | LOCK_NB) == 0) return fd; else if (timercmp(&time_left, wait_interval, <)) return XT_LOCK_BUSY; if (++i % 10 == 0) { fprintf(stderr, "Another app is currently holding the xtables lock; " "still %lds %ldus time ahead to have a chance to grab the lock...\n", time_left.tv_sec, time_left.tv_usec); } wait_time = *wait_interval; select(0, NULL, NULL, NULL, &wait_time); timersub(&time_left, wait_interval, &time_left); } }
C
netfilter
0
CVE-2016-6515
https://www.cvedetails.com/cve/CVE-2016-6515/
CWE-20
https://github.com/openssh/openssh-portable/commit/fcd135c9df440bcd2d5870405ad3311743d78d97
fcd135c9df440bcd2d5870405ad3311743d78d97
upstream commit Skip passwords longer than 1k in length so clients can't easily DoS sshd by sending very long passwords, causing it to spend CPU hashing them. feedback djm@, ok markus@. Brought to our attention by tomas.kuthan at oracle.com, shilei-c at 360.cn and coredump at autistici.org Upstream-ID: d0af7d4a2190b63ba1d38eec502bc4be0be9e333
warn_expiry(Authctxt *authctxt, auth_session_t *as) { char buf[256]; quad_t pwtimeleft, actimeleft, daysleft, pwwarntime, acwarntime; pwwarntime = acwarntime = TWO_WEEKS; pwtimeleft = auth_check_change(as); actimeleft = auth_check_expire(as); #ifdef HAVE_LOGIN_CAP if (authctxt->valid) { pwwarntime = login_getcaptime(lc, "password-warn", TWO_WEEKS, TWO_WEEKS); acwarntime = login_getcaptime(lc, "expire-warn", TWO_WEEKS, TWO_WEEKS); } #endif if (pwtimeleft != 0 && pwtimeleft < pwwarntime) { daysleft = pwtimeleft / DAY + 1; snprintf(buf, sizeof(buf), "Your password will expire in %lld day%s.\n", daysleft, daysleft == 1 ? "" : "s"); buffer_append(&loginmsg, buf, strlen(buf)); } if (actimeleft != 0 && actimeleft < acwarntime) { daysleft = actimeleft / DAY + 1; snprintf(buf, sizeof(buf), "Your account will expire in %lld day%s.\n", daysleft, daysleft == 1 ? "" : "s"); buffer_append(&loginmsg, buf, strlen(buf)); } }
warn_expiry(Authctxt *authctxt, auth_session_t *as) { char buf[256]; quad_t pwtimeleft, actimeleft, daysleft, pwwarntime, acwarntime; pwwarntime = acwarntime = TWO_WEEKS; pwtimeleft = auth_check_change(as); actimeleft = auth_check_expire(as); #ifdef HAVE_LOGIN_CAP if (authctxt->valid) { pwwarntime = login_getcaptime(lc, "password-warn", TWO_WEEKS, TWO_WEEKS); acwarntime = login_getcaptime(lc, "expire-warn", TWO_WEEKS, TWO_WEEKS); } #endif if (pwtimeleft != 0 && pwtimeleft < pwwarntime) { daysleft = pwtimeleft / DAY + 1; snprintf(buf, sizeof(buf), "Your password will expire in %lld day%s.\n", daysleft, daysleft == 1 ? "" : "s"); buffer_append(&loginmsg, buf, strlen(buf)); } if (actimeleft != 0 && actimeleft < acwarntime) { daysleft = actimeleft / DAY + 1; snprintf(buf, sizeof(buf), "Your account will expire in %lld day%s.\n", daysleft, daysleft == 1 ? "" : "s"); buffer_append(&loginmsg, buf, strlen(buf)); } }
C
openssh-portable
0
CVE-2013-1790
https://www.cvedetails.com/cve/CVE-2013-1790/
CWE-119
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=b1026b5978c385328f2a15a2185c599a563edf91
b1026b5978c385328f2a15a2185c599a563edf91
null
void RGBGrayEncoder::reset() { str->reset(); bufPtr = bufEnd = buf; eof = gFalse; }
void RGBGrayEncoder::reset() { str->reset(); bufPtr = bufEnd = buf; eof = gFalse; }
CPP
poppler
0
CVE-2013-6636
https://www.cvedetails.com/cve/CVE-2013-6636/
CWE-20
https://github.com/chromium/chromium/commit/5cfe3023574666663d970ce48cdbc8ed15ce61d9
5cfe3023574666663d970ce48cdbc8ed15ce61d9
Clear out some minor TODOs. BUG=none Review URL: https://codereview.chromium.org/1047063002 Cr-Commit-Position: refs/heads/master@{#322959}
ValidityMessages::ValidityMessages() : default_message_(ValidityMessage(base::string16(), false)) {}
ValidityMessages::ValidityMessages() : default_message_(ValidityMessage(base::string16(), false)) {}
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/aac449e7154720b895ff1e7f3497c2ce95ae1a5a
aac449e7154720b895ff1e7f3497c2ce95ae1a5a
POSIX: make sure that we never pass directory descriptors into the sandbox. BUG=43304 http://codereview.chromium.org/2733011/show git-svn-id: svn://svn.chromium.org/chrome/trunk/src@49446 0039d316-1c4b-4281-b951-d872f2087c98
int Channel::ChannelImpl::GetClientFileDescriptor() const { return client_pipe_; }
int Channel::ChannelImpl::GetClientFileDescriptor() const { return client_pipe_; }
C
Chrome
0
CVE-2018-6124
https://www.cvedetails.com/cve/CVE-2018-6124/
null
https://github.com/chromium/chromium/commit/7712d138374a92c4d2f3b05cdc86d1a7a523702b
7712d138374a92c4d2f3b05cdc86d1a7a523702b
ReadableStreamBytesConsumer should check read results ReadableStreamBytesConsumer expected that the results from ReadableStreamReaderDefaultRead should be Promise<Object> because that is provided from ReadableStream provided by blink, but it's possible to inject arbitrary values with the promise assimilation. This CL adds additional checks for such injection. Bug: 840320 Change-Id: I7b3c6a8bfcf563dd860b133ff0295dd7a5d5fea5 Reviewed-on: https://chromium-review.googlesource.com/1049413 Commit-Queue: Yutaka Hirano <yhirano@chromium.org> Reviewed-by: Adam Rice <ricea@chromium.org> Cr-Commit-Position: refs/heads/master@{#556751}
BytesConsumer::PublicState ReadableStreamBytesConsumer::GetPublicState() const { return state_; }
BytesConsumer::PublicState ReadableStreamBytesConsumer::GetPublicState() const { return state_; }
C
Chrome
0
CVE-2018-1000037
https://www.cvedetails.com/cve/CVE-2018-1000037/
CWE-20
http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=8a3257b01faa899dd9b5e35c6bb3403cd709c371;hp=de39f005f12a1afc6973c1f5cec362d6545f70cb
8a3257b01faa899dd9b5e35c6bb3403cd709c371
null
pdf_tos_newline(pdf_text_object_state *tos, float leading) { fz_pre_translate(&tos->tlm, 0, -leading); tos->tm = tos->tlm; }
pdf_tos_newline(pdf_text_object_state *tos, float leading) { fz_pre_translate(&tos->tlm, 0, -leading); tos->tm = tos->tlm; }
C
ghostscript
0
CVE-2016-5216
https://www.cvedetails.com/cve/CVE-2016-5216/
CWE-416
https://github.com/chromium/chromium/commit/bf6a6765d44b09c64b8c75d749efb84742a250e7
bf6a6765d44b09c64b8c75d749efb84742a250e7
[pdf] Defer page unloading in JS callback. One of the callbacks from PDFium JavaScript into the embedder is to get the current page number. In Chromium, this will trigger a call to CalculateMostVisiblePage that method will determine the visible pages and unload any non-visible pages. But, if the originating JS is on a non-visible page we'll delete the page and annotations associated with that page. This will cause issues as we are currently working with those objects when the JavaScript returns. This Cl defers the page unloading triggered by getting the most visible page until the next event is handled by the Chromium embedder. BUG=chromium:653090 Review-Url: https://codereview.chromium.org/2418533002 Cr-Commit-Position: refs/heads/master@{#424781}
void PDFiumEngine::GetPasswordAndLoad() { getting_password_ = true; DCHECK(!doc_ && FPDF_GetLastError() == FPDF_ERR_PASSWORD); client_->GetDocumentPassword(password_factory_.NewCallbackWithOutput( &PDFiumEngine::OnGetPasswordComplete)); }
void PDFiumEngine::GetPasswordAndLoad() { getting_password_ = true; DCHECK(!doc_ && FPDF_GetLastError() == FPDF_ERR_PASSWORD); client_->GetDocumentPassword(password_factory_.NewCallbackWithOutput( &PDFiumEngine::OnGetPasswordComplete)); }
C
Chrome
0
CVE-2017-7616
https://www.cvedetails.com/cve/CVE-2017-7616/
CWE-388
https://github.com/torvalds/linux/commit/cf01fb9985e8deb25ccf0ea54d916b8871ae0e62
cf01fb9985e8deb25ccf0ea54d916b8871ae0e62
mm/mempolicy.c: fix error handling in set_mempolicy and mbind. In the case that compat_get_bitmap fails we do not want to copy the bitmap to the user as it will contain uninitialized stack data and leak sensitive data. Signed-off-by: Chris Salls <salls@cs.ucsb.edu> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) return -EFAULT; nm = compat_alloc_user_space(alloc_size); if (copy_to_user(nm, nodes_addr(bm), alloc_size)) return -EFAULT; } return sys_mbind(start, len, mode, nm, nr_bits+1, flags); }
COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); nm = compat_alloc_user_space(alloc_size); err |= copy_to_user(nm, nodes_addr(bm), alloc_size); } if (err) return -EFAULT; return sys_mbind(start, len, mode, nm, nr_bits+1, flags); }
C
linux
1
CVE-2018-6560
https://www.cvedetails.com/cve/CVE-2018-6560/
CWE-436
https://github.com/flatpak/flatpak/commit/52346bf187b5a7f1c0fe9075b328b7ad6abe78f6
52346bf187b5a7f1c0fe9075b328b7ad6abe78f6
Fix vulnerability in dbus proxy During the authentication all client data is directly forwarded to the dbus daemon as is, until we detect the BEGIN command after which we start filtering the binary dbus protocol. Unfortunately the detection of the BEGIN command in the proxy did not exactly match the detection in the dbus daemon. A BEGIN followed by a space or tab was considered ok in the daemon but not by the proxy. This could be exploited to send arbitrary dbus messages to the host, which can be used to break out of the sandbox. This was noticed by Gabriel Campana of The Google Security Team. This fix makes the detection of the authentication phase end match the dbus code. In addition we duplicate the authentication line validation from dbus, which includes ensuring all data is ASCII, and limiting the size of a line to 16k. In fact, we add some extra stringent checks, disallowing ASCII control chars and requiring that auth lines start with a capital letter.
get_bool_reply_for_roundtrip (FlatpakProxyClient *client, Header *header, gboolean val) { Buffer *ping_buffer = get_ping_buffer_for_header (header); GDBusMessage *reply; reply = get_bool_reply_for_header (client, header, val); g_hash_table_replace (client->rewrite_reply, GINT_TO_POINTER (header->serial), reply); return ping_buffer; }
get_bool_reply_for_roundtrip (FlatpakProxyClient *client, Header *header, gboolean val) { Buffer *ping_buffer = get_ping_buffer_for_header (header); GDBusMessage *reply; reply = get_bool_reply_for_header (client, header, val); g_hash_table_replace (client->rewrite_reply, GINT_TO_POINTER (header->serial), reply); return ping_buffer; }
C
flatpak
0
null
null
null
https://github.com/chromium/chromium/commit/ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
Clean up calls like "gfx::Rect(0, 0, size().width(), size().height()". The caller can use the much shorter "gfx::Rect(size())", since gfx::Rect has a constructor that just takes a Size. BUG=none TEST=none Review URL: http://codereview.chromium.org/2204001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48283 0039d316-1c4b-4281-b951-d872f2087c98
void WebPluginDelegatePepper::RenderViewFlushedPaint() { Graphics2DMap::iterator iter2d(&graphic2d_contexts_); while (!iter2d.IsAtEnd()) { iter2d.GetCurrentValue()->RenderViewFlushedPaint(); iter2d.Advance(); } }
void WebPluginDelegatePepper::RenderViewFlushedPaint() { Graphics2DMap::iterator iter2d(&graphic2d_contexts_); while (!iter2d.IsAtEnd()) { iter2d.GetCurrentValue()->RenderViewFlushedPaint(); iter2d.Advance(); } }
C
Chrome
0
CVE-2011-4621
https://www.cvedetails.com/cve/CVE-2011-4621/
null
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <efault@gmx.de> Reported-by: Bjoern B. Brandenburg <bbb.lst@gmail.com> Tested-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org LKML-Reference: <1291802742.1417.9.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
static __init int sched_init_debug(void) { debugfs_create_file("sched_features", 0644, NULL, NULL, &sched_feat_fops); return 0; }
static __init int sched_init_debug(void) { debugfs_create_file("sched_features", 0644, NULL, NULL, &sched_feat_fops); return 0; }
C
linux
0
CVE-2016-5354
https://www.cvedetails.com/cve/CVE-2016-5354/
CWE-476
https://github.com/wireshark/wireshark/commit/2cb5985bf47bdc8bea78d28483ed224abdd33dc6
2cb5985bf47bdc8bea78d28483ed224abdd33dc6
Make class "type" for USB conversations. USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match. Bug: 12356 Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209 Reviewed-on: https://code.wireshark.org/review/15212 Petri-Dish: Michael Mann <mmann78@netscape.net> Reviewed-by: Martin Kaiser <wireshark@kaiser.cx> Petri-Dish: Martin Kaiser <wireshark@kaiser.cx> Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org> Reviewed-by: Michael Mann <mmann78@netscape.net>
dissect_u3v_descriptors(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, void *data _U_) { guint8 type; gint offset = 0; proto_item * ti; proto_tree * sub_tree; guint32 version; /* The descriptor must at least have a length and type field. */ if (tvb_reported_length(tvb) < 2) { return 0; } /* skip len */ type = tvb_get_guint8(tvb, 1); /* Check for U3V device info descriptor. */ if (type != DESCRIPTOR_TYPE_U3V_INTERFACE) { return 0; } ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor); /* bLength */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bLength, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bDescriptorType */ ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bDescriptorType, tvb, offset, 1, ENC_LITTLE_ENDIAN); proto_item_append_text(ti, " (U3V INTERFACE)"); offset++; /* bDescriptorSubtype */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bDescriptorSubtype, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bGenCPVersion */ if (!tvb_bytes_exist(tvb, offset, 4)) { /* Version not completely in buffer -> break dissection here. */ return offset; } version = tvb_get_letohl(tvb, offset); ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bGenCPVersion, tvb, offset, 4, ENC_NA); proto_item_append_text(ti, ": %u.%u", version >> 16, version & 0xFFFF); sub_tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor_gencp_version); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bGenCPVersion_minor, tvb, offset, 4, ENC_LITTLE_ENDIAN); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bGenCPVersion_major, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* bU3VVersion */ if (!tvb_bytes_exist(tvb, offset, 4)) { /* Version not completely in buffer -> break dissection here. */ return offset; } version = tvb_get_letohl(tvb, offset); ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bU3VVersion, tvb, offset, 4, ENC_NA); proto_item_append_text(ti, ": %u.%u", version >> 16, version & 0xFFFF); sub_tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor_u3v_version); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bU3VVersion_minor, tvb, offset, 4, ENC_LITTLE_ENDIAN); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bU3VVersion_major, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* iDeviceGUID */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iDeviceGUID, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iVendorName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iVendorName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iModelName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iModelName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iFamilyName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iFamilyName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iDeviceVersion */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iDeviceVersion, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iManufacturerInfo */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iManufacturerInfo, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iSerialNumber */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iSerialNumber, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iUserDefinedName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iUserDefinedName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bmSpeedSupport */ proto_tree_add_bitmask(tree, tvb, offset, hf_u3v_device_info_descriptor_bmSpeedSupport, ett_u3v_device_info_descriptor_speed_support, speed_support_fields, ENC_LITTLE_ENDIAN); offset++; return offset; }
dissect_u3v_descriptors(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, void *data _U_) { guint8 type; gint offset = 0; proto_item * ti; proto_tree * sub_tree; guint32 version; /* The descriptor must at least have a length and type field. */ if (tvb_reported_length(tvb) < 2) { return 0; } /* skip len */ type = tvb_get_guint8(tvb, 1); /* Check for U3V device info descriptor. */ if (type != DESCRIPTOR_TYPE_U3V_INTERFACE) { return 0; } ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor); /* bLength */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bLength, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bDescriptorType */ ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bDescriptorType, tvb, offset, 1, ENC_LITTLE_ENDIAN); proto_item_append_text(ti, " (U3V INTERFACE)"); offset++; /* bDescriptorSubtype */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bDescriptorSubtype, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bGenCPVersion */ if (!tvb_bytes_exist(tvb, offset, 4)) { /* Version not completely in buffer -> break dissection here. */ return offset; } version = tvb_get_letohl(tvb, offset); ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bGenCPVersion, tvb, offset, 4, ENC_NA); proto_item_append_text(ti, ": %u.%u", version >> 16, version & 0xFFFF); sub_tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor_gencp_version); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bGenCPVersion_minor, tvb, offset, 4, ENC_LITTLE_ENDIAN); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bGenCPVersion_major, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* bU3VVersion */ if (!tvb_bytes_exist(tvb, offset, 4)) { /* Version not completely in buffer -> break dissection here. */ return offset; } version = tvb_get_letohl(tvb, offset); ti = proto_tree_add_item(tree, hf_u3v_device_info_descriptor_bU3VVersion, tvb, offset, 4, ENC_NA); proto_item_append_text(ti, ": %u.%u", version >> 16, version & 0xFFFF); sub_tree = proto_item_add_subtree(ti, ett_u3v_device_info_descriptor_u3v_version); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bU3VVersion_minor, tvb, offset, 4, ENC_LITTLE_ENDIAN); proto_tree_add_item(sub_tree, hf_u3v_device_info_descriptor_bU3VVersion_major, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* iDeviceGUID */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iDeviceGUID, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iVendorName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iVendorName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iModelName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iModelName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iFamilyName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iFamilyName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iDeviceVersion */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iDeviceVersion, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iManufacturerInfo */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iManufacturerInfo, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iSerialNumber */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iSerialNumber, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* iUserDefinedName */ proto_tree_add_item(tree, hf_u3v_device_info_descriptor_iUserDefinedName, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; /* bmSpeedSupport */ proto_tree_add_bitmask(tree, tvb, offset, hf_u3v_device_info_descriptor_bmSpeedSupport, ett_u3v_device_info_descriptor_speed_support, speed_support_fields, ENC_LITTLE_ENDIAN); offset++; return offset; }
C
wireshark
0
CVE-2015-1299
https://www.cvedetails.com/cve/CVE-2015-1299/
null
https://github.com/chromium/chromium/commit/1810bb5cec9026c64fc34fbbb8fafd01263241d2
1810bb5cec9026c64fc34fbbb8fafd01263241d2
Disable forwarding tasks to the Blink scheduler Disable forwarding tasks to the Blink scheduler to avoid some regressions which it has introduced. BUG=391005,415758,415478,412714,416362,416827,417608 TBR=jamesr@chromium.org Review URL: https://codereview.chromium.org/609483002 Cr-Commit-Position: refs/heads/master@{#296916}
scoped_ptr<gfx::GpuMemoryBuffer> RenderThreadImpl::AllocateGpuMemoryBuffer( size_t width, size_t height, unsigned internalformat, unsigned usage) { TRACE_EVENT0("renderer", "RenderThreadImpl::AllocateGpuMemoryBuffer"); DCHECK(allocate_gpu_memory_buffer_thread_checker_.CalledOnValidThread()); if (!GpuMemoryBufferImpl::IsFormatValid(internalformat)) return scoped_ptr<gfx::GpuMemoryBuffer>(); gfx::GpuMemoryBufferHandle handle; bool success; IPC::Message* message = new ChildProcessHostMsg_SyncAllocateGpuMemoryBuffer( width, height, internalformat, usage, &handle); if (base::MessageLoop::current() == message_loop()) success = ChildThread::Send(message); else success = sync_message_filter()->Send(message); if (!success) return scoped_ptr<gfx::GpuMemoryBuffer>(); scoped_ptr<GpuMemoryBufferImpl> buffer(GpuMemoryBufferImpl::CreateFromHandle( handle, gfx::Size(width, height), internalformat, base::Bind(&DeletedGpuMemoryBuffer, make_scoped_refptr(thread_safe_sender()), handle.type, handle.global_id))); if (!buffer) { thread_safe_sender()->Send(new ChildProcessHostMsg_DeletedGpuMemoryBuffer( handle.type, handle.global_id)); return scoped_ptr<gfx::GpuMemoryBuffer>(); } return buffer.PassAs<gfx::GpuMemoryBuffer>(); }
scoped_ptr<gfx::GpuMemoryBuffer> RenderThreadImpl::AllocateGpuMemoryBuffer( size_t width, size_t height, unsigned internalformat, unsigned usage) { TRACE_EVENT0("renderer", "RenderThreadImpl::AllocateGpuMemoryBuffer"); DCHECK(allocate_gpu_memory_buffer_thread_checker_.CalledOnValidThread()); if (!GpuMemoryBufferImpl::IsFormatValid(internalformat)) return scoped_ptr<gfx::GpuMemoryBuffer>(); gfx::GpuMemoryBufferHandle handle; bool success; IPC::Message* message = new ChildProcessHostMsg_SyncAllocateGpuMemoryBuffer( width, height, internalformat, usage, &handle); if (base::MessageLoop::current() == message_loop()) success = ChildThread::Send(message); else success = sync_message_filter()->Send(message); if (!success) return scoped_ptr<gfx::GpuMemoryBuffer>(); scoped_ptr<GpuMemoryBufferImpl> buffer(GpuMemoryBufferImpl::CreateFromHandle( handle, gfx::Size(width, height), internalformat, base::Bind(&DeletedGpuMemoryBuffer, make_scoped_refptr(thread_safe_sender()), handle.type, handle.global_id))); if (!buffer) { thread_safe_sender()->Send(new ChildProcessHostMsg_DeletedGpuMemoryBuffer( handle.type, handle.global_id)); return scoped_ptr<gfx::GpuMemoryBuffer>(); } return buffer.PassAs<gfx::GpuMemoryBuffer>(); }
C
Chrome
0
CVE-2011-3209
https://www.cvedetails.com/cve/CVE-2011-3209/
CWE-189
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
f8bd2258e2d520dff28c855658bd24bdafb5102d
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; return p + alloc; }
static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; return p + alloc; }
C
linux
0
CVE-2014-3160
https://www.cvedetails.com/cve/CVE-2014-3160/
CWE-264
https://github.com/chromium/chromium/commit/ee281f7cac9df44fe241a37f188b28be8845ded0
ee281f7cac9df44fe241a37f188b28be8845ded0
Enforce SVG image security rules SVG images have unique security rules that prevent them from loading any external resources. This patch enforces these rules in ResourceFetcher::canRequest for all non-data-uri resources. This locks down our SVG resource handling and fixes two security bugs. In the case of SVG images that reference other images, we had a bug where a cached subresource would be used directly from the cache. This has been fixed because the canRequest check occurs before we use cached resources. In the case of SVG images that use CSS imports, we had a bug where imports were blindly requested. This has been fixed by stopping all non-data-uri requests in SVG images. With this patch we now match Gecko's behavior on both testcases. BUG=380885, 382296 Review URL: https://codereview.chromium.org/320763002 git-svn-id: svn://svn.chromium.org/blink/trunk@176084 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void ResourceFetcher::garbageCollectDocumentResourcesTimerFired(Timer<ResourceFetcher>* timer) { ASSERT_UNUSED(timer, timer == &m_garbageCollectDocumentResourcesTimer); garbageCollectDocumentResources(); }
void ResourceFetcher::garbageCollectDocumentResourcesTimerFired(Timer<ResourceFetcher>* timer) { ASSERT_UNUSED(timer, timer == &m_garbageCollectDocumentResourcesTimer); garbageCollectDocumentResources(); }
C
Chrome
0
CVE-2014-8133
https://www.cvedetails.com/cve/CVE-2014-8133/
CWE-264
https://github.com/torvalds/linux/commit/41bdc78544b8a93a9c6814b8bbbfef966272abbe
41bdc78544b8a93a9c6814b8bbbfef966272abbe
x86/tls: Validate TLS entries to protect espfix Installing a 16-bit RW data segment into the GDT defeats espfix. AFAICT this will not affect glibc, Wine, or dosemu at all. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Acked-by: H. Peter Anvin <hpa@zytor.com> Cc: stable@vger.kernel.org Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: security@kernel.org <security@kernel.org> Cc: Willy Tarreau <w@1wt.eu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
int regset_tls_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct desc_struct *tls; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; pos /= sizeof(struct user_desc); count /= sizeof(struct user_desc); tls = &target->thread.tls_array[pos]; if (kbuf) { struct user_desc *info = kbuf; while (count-- > 0) fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, tls++); } else { struct user_desc __user *u_info = ubuf; while (count-- > 0) { struct user_desc info; fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); if (__copy_to_user(u_info++, &info, sizeof(info))) return -EFAULT; } } return 0; }
int regset_tls_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { const struct desc_struct *tls; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; pos /= sizeof(struct user_desc); count /= sizeof(struct user_desc); tls = &target->thread.tls_array[pos]; if (kbuf) { struct user_desc *info = kbuf; while (count-- > 0) fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, tls++); } else { struct user_desc __user *u_info = ubuf; while (count-- > 0) { struct user_desc info; fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); if (__copy_to_user(u_info++, &info, sizeof(info))) return -EFAULT; } } return 0; }
C
linux
0
CVE-2011-3103
https://www.cvedetails.com/cve/CVE-2011-3103/
CWE-399
https://github.com/chromium/chromium/commit/b2dfe7c175fb21263f06eb586f1ed235482a3281
b2dfe7c175fb21263f06eb586f1ed235482a3281
[EFL] fast/frames/frame-crash-with-page-cache.html is crashing https://bugs.webkit.org/show_bug.cgi?id=85879 Patch by Mikhail Pozdnyakov <mikhail.pozdnyakov@intel.com> on 2012-05-17 Reviewed by Noam Rosenthal. Source/WebKit/efl: _ewk_frame_smart_del() is considering now that the frame can be present in cache. loader()->detachFromParent() is only applied for the main frame. loader()->cancelAndClear() is not used anymore. * ewk/ewk_frame.cpp: (_ewk_frame_smart_del): LayoutTests: * platform/efl/test_expectations.txt: Removed fast/frames/frame-crash-with-page-cache.html. git-svn-id: svn://svn.chromium.org/blink/trunk@117409 bbb929c8-8fbe-4397-9dbb-9b2b20218538
Eina_Bool ewk_frame_page_zoom_set(Evas_Object* ewkFrame, float pageZoomFactor) { EWK_FRAME_SD_GET_OR_RETURN(ewkFrame, smartData, false); EINA_SAFETY_ON_NULL_RETURN_VAL(smartData->frame, false); smartData->frame->setPageZoomFactor(pageZoomFactor); return true; }
Eina_Bool ewk_frame_page_zoom_set(Evas_Object* ewkFrame, float pageZoomFactor) { EWK_FRAME_SD_GET_OR_RETURN(ewkFrame, smartData, false); EINA_SAFETY_ON_NULL_RETURN_VAL(smartData->frame, false); smartData->frame->setPageZoomFactor(pageZoomFactor); return true; }
C
Chrome
0
CVE-2015-0293
https://www.cvedetails.com/cve/CVE-2015-0293/
CWE-20
https://git.openssl.org/?p=openssl.git;a=commit;h=86f8fb0e344d62454f8daf3e15236b2b59210756
86f8fb0e344d62454f8daf3e15236b2b59210756
null
int ssl2_generate_key_material(SSL *s) { unsigned int i; EVP_MD_CTX ctx; unsigned char *km; unsigned char c = '0'; const EVP_MD *md5; int md_size; md5 = EVP_md5(); # ifdef CHARSET_EBCDIC c = os_toascii['0']; /* Must be an ASCII '0', not EBCDIC '0', see * SSLv2 docu */ # endif EVP_MD_CTX_init(&ctx); km = s->s2->key_material; if (s->session->master_key_length < 0 || s->session->master_key_length > (int)sizeof(s->session->master_key)) { SSLerr(SSL_F_SSL2_GENERATE_KEY_MATERIAL, ERR_R_INTERNAL_ERROR); return 0; } md_size = EVP_MD_size(md5); if (md_size < 0) return 0; for (i = 0; i < s->s2->key_material_length; i += md_size) { if (((km - s->s2->key_material) + md_size) > (int)sizeof(s->s2->key_material)) { /* * EVP_DigestFinal_ex() below would write beyond buffer */ SSLerr(SSL_F_SSL2_GENERATE_KEY_MATERIAL, ERR_R_INTERNAL_ERROR); return 0; } EVP_DigestInit_ex(&ctx, md5, NULL); OPENSSL_assert(s->session->master_key_length >= 0 && s->session->master_key_length <= (int)sizeof(s->session->master_key)); EVP_DigestUpdate(&ctx, s->session->master_key, s->session->master_key_length); EVP_DigestUpdate(&ctx, &c, 1); c++; EVP_DigestUpdate(&ctx, s->s2->challenge, s->s2->challenge_length); EVP_DigestUpdate(&ctx, s->s2->conn_id, s->s2->conn_id_length); EVP_DigestFinal_ex(&ctx, km, NULL); km += md_size; } EVP_MD_CTX_cleanup(&ctx); return 1; }
int ssl2_generate_key_material(SSL *s) { unsigned int i; EVP_MD_CTX ctx; unsigned char *km; unsigned char c = '0'; const EVP_MD *md5; int md_size; md5 = EVP_md5(); # ifdef CHARSET_EBCDIC c = os_toascii['0']; /* Must be an ASCII '0', not EBCDIC '0', see * SSLv2 docu */ # endif EVP_MD_CTX_init(&ctx); km = s->s2->key_material; if (s->session->master_key_length < 0 || s->session->master_key_length > (int)sizeof(s->session->master_key)) { SSLerr(SSL_F_SSL2_GENERATE_KEY_MATERIAL, ERR_R_INTERNAL_ERROR); return 0; } md_size = EVP_MD_size(md5); if (md_size < 0) return 0; for (i = 0; i < s->s2->key_material_length; i += md_size) { if (((km - s->s2->key_material) + md_size) > (int)sizeof(s->s2->key_material)) { /* * EVP_DigestFinal_ex() below would write beyond buffer */ SSLerr(SSL_F_SSL2_GENERATE_KEY_MATERIAL, ERR_R_INTERNAL_ERROR); return 0; } EVP_DigestInit_ex(&ctx, md5, NULL); OPENSSL_assert(s->session->master_key_length >= 0 && s->session->master_key_length < (int)sizeof(s->session->master_key)); EVP_DigestUpdate(&ctx, s->session->master_key, s->session->master_key_length); EVP_DigestUpdate(&ctx, &c, 1); c++; EVP_DigestUpdate(&ctx, s->s2->challenge, s->s2->challenge_length); EVP_DigestUpdate(&ctx, s->s2->conn_id, s->s2->conn_id_length); EVP_DigestFinal_ex(&ctx, km, NULL); km += md_size; } EVP_MD_CTX_cleanup(&ctx); return 1; }
C
openssl
1
null
null
null
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
5041f984669fe3a989a84c348eb838c8f7233f6b
AutoFill: Release the cached frame when we receive the frameDestroyed() message from WebKit. BUG=48857 TEST=none Review URL: http://codereview.chromium.org/3173005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
WebKit::WebPlugin* RenderView::GetWebPluginFromPluginDocument() { return webview()->mainFrame()->document().to<WebPluginDocument>().plugin(); }
WebKit::WebPlugin* RenderView::GetWebPluginFromPluginDocument() { return webview()->mainFrame()->document().to<WebPluginDocument>().plugin(); }
C
Chrome
0
CVE-2017-15128
https://www.cvedetails.com/cve/CVE-2017-15128/
CWE-119
https://github.com/torvalds/linux/commit/1e3921471354244f70fe268586ff94a97a6dd4df
1e3921471354244f70fe268586ff94a97a6dd4df
userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size This oops: kernel BUG at fs/hugetlbfs/inode.c:484! RIP: remove_inode_hugepages+0x3d0/0x410 Call Trace: hugetlbfs_setattr+0xd9/0x130 notify_change+0x292/0x410 do_truncate+0x65/0xa0 do_sys_ftruncate.constprop.3+0x11a/0x180 SyS_ftruncate+0xe/0x10 tracesys+0xd9/0xde was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte. mmap() can still succeed beyond the end of the i_size after vmtruncate zapped vmas in those ranges, but the faults must not succeed, and that includes UFFDIO_COPY. We could differentiate the retval to userland to represent a SIGBUS like a page fault would do (vs SIGSEGV), but it doesn't seem very useful and we'd need to pick a random retval as there's no meaningful syscall retval that would differentiate from SIGSEGV and SIGBUS, there's just -EFAULT. Link: http://lkml.kernel.org/r/20171016223914.2421-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static inline gfp_t htlb_alloc_mask(struct hstate *h) { if (hugepages_treat_as_movable || hugepage_migration_supported(h)) return GFP_HIGHUSER_MOVABLE; else return GFP_HIGHUSER; }
static inline gfp_t htlb_alloc_mask(struct hstate *h) { if (hugepages_treat_as_movable || hugepage_migration_supported(h)) return GFP_HIGHUSER_MOVABLE; else return GFP_HIGHUSER; }
C
linux
0
CVE-2012-5136
https://www.cvedetails.com/cve/CVE-2012-5136/
CWE-20
https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502
401d30ef93030afbf7e81e53a11b68fc36194502
Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document The member is used only in Document, thus no reason to stay in SecurityContext. TEST=none BUG=none R=haraken@chromium.org, abarth, haraken, hayato Review URL: https://codereview.chromium.org/27615003 git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void Document::setVisuallyOrdered() { m_visuallyOrdered = true; if (renderer()) renderer()->style()->setRTLOrdering(VisualOrder); setNeedsStyleRecalc(); }
void Document::setVisuallyOrdered() { m_visuallyOrdered = true; if (renderer()) renderer()->style()->setRTLOrdering(VisualOrder); setNeedsStyleRecalc(); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/a5333583f14284a411abac2fef7caed889a8bba3
a5333583f14284a411abac2fef7caed889a8bba3
Wire InstallFinished and add some InstallEvent.waitUntil tests BUG=285976 TEST=content_browsertests:ServiceWorkerVersionBrowserTest.Install* Committed: https://src.chromium.org/viewvc/chrome?view=rev&revision=250804 Review URL: https://codereview.chromium.org/153553008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@250936 0039d316-1c4b-4281-b951-d872f2087c98
virtual void TearDownOnIOThread() {}
virtual void TearDownOnIOThread() {}
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
Clean up calls like "gfx::Rect(0, 0, size().width(), size().height()". The caller can use the much shorter "gfx::Rect(size())", since gfx::Rect has a constructor that just takes a Size. BUG=none TEST=none Review URL: http://codereview.chromium.org/2204001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48283 0039d316-1c4b-4281-b951-d872f2087c98
bool WebPluginDelegateProxy::CreateLocalBitmap( std::vector<uint8>* memory, scoped_ptr<skia::PlatformCanvas>* canvas) { const size_t size = BitmapSizeForPluginRect(plugin_rect_); memory->resize(size); if (memory->size() != size) return false; canvas->reset(new skia::PlatformCanvas( plugin_rect_.width(), plugin_rect_.height(), true, &((*memory)[0]))); return true; }
bool WebPluginDelegateProxy::CreateLocalBitmap( std::vector<uint8>* memory, scoped_ptr<skia::PlatformCanvas>* canvas) { const size_t size = BitmapSizeForPluginRect(plugin_rect_); memory->resize(size); if (memory->size() != size) return false; canvas->reset(new skia::PlatformCanvas( plugin_rect_.width(), plugin_rect_.height(), true, &((*memory)[0]))); return true; }
C
Chrome
0
CVE-2015-1265
https://www.cvedetails.com/cve/CVE-2015-1265/
null
https://github.com/chromium/chromium/commit/8ea5693d5cf304e56174bb6b65412f04209904db
8ea5693d5cf304e56174bb6b65412f04209904db
Move Editor::Transpose() out of Editor class This patch moves |Editor::Transpose()| out of |Editor| class as preparation of expanding it into |ExecutTranspose()| in "EditorCommand.cpp" to make |Editor| class simpler for improving code health. Following patch will expand |Transpose()| into |ExecutTranspose()|. Bug: 672405 Change-Id: Icde253623f31813d2b4517c4da7d4798bd5fadf6 Reviewed-on: https://chromium-review.googlesource.com/583880 Reviewed-by: Xiaocheng Hu <xiaochengh@chromium.org> Commit-Queue: Yoshifumi Inoue <yosin@chromium.org> Cr-Commit-Position: refs/heads/master@{#489518}
static bool EnabledCut(LocalFrame& frame, Event*, EditorCommandSource source) { if (!CanWriteClipboard(frame, source)) return false; if (source == kCommandFromMenuOrKeyBinding && !frame.Selection().SelectionHasFocus()) return false; return frame.GetEditor().CanDHTMLCut() || frame.GetEditor().CanCut(); }
static bool EnabledCut(LocalFrame& frame, Event*, EditorCommandSource source) { if (!CanWriteClipboard(frame, source)) return false; if (source == kCommandFromMenuOrKeyBinding && !frame.Selection().SelectionHasFocus()) return false; return frame.GetEditor().CanDHTMLCut() || frame.GetEditor().CanCut(); }
C
Chrome
0
CVE-2019-12068
https://www.cvedetails.com/cve/CVE-2019-12068/
CWE-835
https://git.qemu.org/?p=qemu.git;a=commit;h=de594e47659029316bbf9391efb79da0a1a08e08
de594e47659029316bbf9391efb79da0a1a08e08
null
static void lsi_skip_msgbytes(LSIState *s, unsigned int n) { s->dnad += n; s->dbc -= n; }
static void lsi_skip_msgbytes(LSIState *s, unsigned int n) { s->dnad += n; s->dbc -= n; }
C
qemu
0
null
null
null
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
1161a49d663dd395bd639549c2dfe7324f847938
Don't populate URL data in WebDropData when dragging files. This is considered a potential security issue as well, since it leaks filesystem paths. BUG=332579 Review URL: https://codereview.chromium.org/135633002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
void TabStrip::Init() { set_id(VIEW_ID_TAB_STRIP); set_notify_enter_exit_on_child(true); newtab_button_bounds_.SetRect(0, 0, newtab_button_asset_width(), newtab_button_asset_height() + newtab_button_v_offset()); newtab_button_ = new NewTabButton(this, this); newtab_button_->SetTooltipText( l10n_util::GetStringUTF16(IDS_TOOLTIP_NEW_TAB)); newtab_button_->SetAccessibleName( l10n_util::GetStringUTF16(IDS_ACCNAME_NEWTAB)); newtab_button_->SetImageAlignment(views::ImageButton::ALIGN_LEFT, views::ImageButton::ALIGN_BOTTOM); AddChildView(newtab_button_); if (drop_indicator_width == 0) { gfx::ImageSkia* drop_image = GetDropArrowImage(true); drop_indicator_width = drop_image->width(); drop_indicator_height = drop_image->height(); } }
void TabStrip::Init() { set_id(VIEW_ID_TAB_STRIP); set_notify_enter_exit_on_child(true); newtab_button_bounds_.SetRect(0, 0, newtab_button_asset_width(), newtab_button_asset_height() + newtab_button_v_offset()); newtab_button_ = new NewTabButton(this, this); newtab_button_->SetTooltipText( l10n_util::GetStringUTF16(IDS_TOOLTIP_NEW_TAB)); newtab_button_->SetAccessibleName( l10n_util::GetStringUTF16(IDS_ACCNAME_NEWTAB)); newtab_button_->SetImageAlignment(views::ImageButton::ALIGN_LEFT, views::ImageButton::ALIGN_BOTTOM); AddChildView(newtab_button_); if (drop_indicator_width == 0) { gfx::ImageSkia* drop_image = GetDropArrowImage(true); drop_indicator_width = drop_image->width(); drop_indicator_height = drop_image->height(); } }
C
Chrome
0
CVE-2011-3091
https://www.cvedetails.com/cve/CVE-2011-3091/
CWE-399
https://github.com/chromium/chromium/commit/cc7cde43832b547cdab856fe1bedc9514ca38e13
cc7cde43832b547cdab856fe1bedc9514ca38e13
Add DCHECK to ensure IndexedDBDispatcher doesn't get re-created. This could happen if there are IDB objects that survive the call to didStopWorkerRunLoop. BUG=121734 TEST= Review URL: http://codereview.chromium.org/9999035 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131679 0039d316-1c4b-4281-b951-d872f2087c98
void IndexedDBDispatcher::OnMessageReceived(const IPC::Message& msg) { bool handled = true; IPC_BEGIN_MESSAGE_MAP(IndexedDBDispatcher, msg) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBCursor, OnSuccessOpenCursor) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessCursorContinue, OnSuccessCursorContinue) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessCursorPrefetch, OnSuccessCursorPrefetch) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBDatabase, OnSuccessIDBDatabase) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIndexedDBKey, OnSuccessIndexedDBKey) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBTransaction, OnSuccessIDBTransaction) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessStringList, OnSuccessStringList) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessSerializedScriptValue, OnSuccessSerializedScriptValue) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksError, OnError) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksBlocked, OnBlocked) IPC_MESSAGE_HANDLER(IndexedDBMsg_TransactionCallbacksAbort, OnAbort) IPC_MESSAGE_HANDLER(IndexedDBMsg_TransactionCallbacksComplete, OnComplete) IPC_MESSAGE_HANDLER(IndexedDBMsg_DatabaseCallbacksVersionChange, OnVersionChange) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() DCHECK(handled); }
void IndexedDBDispatcher::OnMessageReceived(const IPC::Message& msg) { bool handled = true; IPC_BEGIN_MESSAGE_MAP(IndexedDBDispatcher, msg) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBCursor, OnSuccessOpenCursor) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessCursorContinue, OnSuccessCursorContinue) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessCursorPrefetch, OnSuccessCursorPrefetch) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBDatabase, OnSuccessIDBDatabase) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIndexedDBKey, OnSuccessIndexedDBKey) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessIDBTransaction, OnSuccessIDBTransaction) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessStringList, OnSuccessStringList) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksSuccessSerializedScriptValue, OnSuccessSerializedScriptValue) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksError, OnError) IPC_MESSAGE_HANDLER(IndexedDBMsg_CallbacksBlocked, OnBlocked) IPC_MESSAGE_HANDLER(IndexedDBMsg_TransactionCallbacksAbort, OnAbort) IPC_MESSAGE_HANDLER(IndexedDBMsg_TransactionCallbacksComplete, OnComplete) IPC_MESSAGE_HANDLER(IndexedDBMsg_DatabaseCallbacksVersionChange, OnVersionChange) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() DCHECK(handled); }
C
Chrome
0
CVE-2016-4579
https://www.cvedetails.com/cve/CVE-2016-4579/
CWE-20
https://git.gnupg.org/cgi-bin/gitweb.cgi?p=libksba.git;a=commit;h=a7eed17a0b2a1c09ef986f3b4b323cd31cea2b64
a7eed17a0b2a1c09ef986f3b4b323cd31cea2b64
null
ksba_ocsp_parse_response (ksba_ocsp_t ocsp, const unsigned char *msg, size_t msglen, ksba_ocsp_response_status_t *response_status) { gpg_error_t err; struct ocsp_reqitem_s *ri; if (!ocsp || !msg || !msglen || !response_status) return gpg_error (GPG_ERR_INV_VALUE); if (!ocsp->requestlist) return gpg_error (GPG_ERR_MISSING_ACTION); /* Reset the fields used to track the response. This is so that we can use the parse function a second time for the same request. This is useful in case of a TryLater response status. */ ocsp->response_status = KSBA_OCSP_RSPSTATUS_NONE; release_ocsp_certlist (ocsp->received_certs); release_ocsp_extensions (ocsp->response_extensions); ocsp->received_certs = NULL; ocsp->hash_length = 0; ocsp->bad_nonce = 0; ocsp->good_nonce = 0; xfree (ocsp->responder_id.name); ocsp->responder_id.name = NULL; xfree (ocsp->responder_id.keyid); ocsp->responder_id.keyid = NULL; for (ri=ocsp->requestlist; ri; ri = ri->next) { ri->status = KSBA_STATUS_NONE; *ri->this_update = 0; *ri->next_update = 0; *ri->revocation_time = 0; ri->revocation_reason = 0; release_ocsp_extensions (ri->single_extensions); } /* Run the actual parser. */ err = parse_response (ocsp, msg, msglen); *response_status = ocsp->response_status; /* FIXME: find duplicates in the request list and set them to the same status. */ if (*response_status == KSBA_OCSP_RSPSTATUS_SUCCESS) if (ocsp->bad_nonce || (ocsp->noncelen && !ocsp->good_nonce)) *response_status = KSBA_OCSP_RSPSTATUS_REPLAYED; return err; }
ksba_ocsp_parse_response (ksba_ocsp_t ocsp, const unsigned char *msg, size_t msglen, ksba_ocsp_response_status_t *response_status) { gpg_error_t err; struct ocsp_reqitem_s *ri; if (!ocsp || !msg || !msglen || !response_status) return gpg_error (GPG_ERR_INV_VALUE); if (!ocsp->requestlist) return gpg_error (GPG_ERR_MISSING_ACTION); /* Reset the fields used to track the response. This is so that we can use the parse function a second time for the same request. This is useful in case of a TryLater response status. */ ocsp->response_status = KSBA_OCSP_RSPSTATUS_NONE; release_ocsp_certlist (ocsp->received_certs); release_ocsp_extensions (ocsp->response_extensions); ocsp->received_certs = NULL; ocsp->hash_length = 0; ocsp->bad_nonce = 0; ocsp->good_nonce = 0; xfree (ocsp->responder_id.name); ocsp->responder_id.name = NULL; xfree (ocsp->responder_id.keyid); ocsp->responder_id.keyid = NULL; for (ri=ocsp->requestlist; ri; ri = ri->next) { ri->status = KSBA_STATUS_NONE; *ri->this_update = 0; *ri->next_update = 0; *ri->revocation_time = 0; ri->revocation_reason = 0; release_ocsp_extensions (ri->single_extensions); } /* Run the actual parser. */ err = parse_response (ocsp, msg, msglen); *response_status = ocsp->response_status; /* FIXME: find duplicates in the request list and set them to the same status. */ if (*response_status == KSBA_OCSP_RSPSTATUS_SUCCESS) if (ocsp->bad_nonce || (ocsp->noncelen && !ocsp->good_nonce)) *response_status = KSBA_OCSP_RSPSTATUS_REPLAYED; return err; }
C
gnupg
0
CVE-2014-1710
https://www.cvedetails.com/cve/CVE-2014-1710/
CWE-119
https://github.com/chromium/chromium/commit/b71fc042e1124cda2ab51dfdacc2362da62779a6
b71fc042e1124cda2ab51dfdacc2362da62779a6
Add bounds validation to AsyncPixelTransfersCompletedQuery::End BUG=351852 R=jbauman@chromium.org, jorgelo@chromium.org Review URL: https://codereview.chromium.org/198253002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@256723 0039d316-1c4b-4281-b951-d872f2087c98
bool AsyncReadPixelsCompletedQuery::Process() { return true; }
bool AsyncReadPixelsCompletedQuery::Process() { return true; }
C
Chrome
0
CVE-2009-3605
https://www.cvedetails.com/cve/CVE-2009-3605/
CWE-189
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
null
JBIG2Bitmap::JBIG2Bitmap(Guint segNumA, int wA, int hA): JBIG2Segment(segNumA) { w = wA; h = hA; line = (wA + 7) >> 3; if (w <= 0 || h <= 0 || line <= 0 || h >= (INT_MAX - 1) / line) { error(-1, "invalid width/height"); data = NULL; return; } data = (Guchar *)gmallocn(h, line + 1); data[h * line] = 0; }
JBIG2Bitmap::JBIG2Bitmap(Guint segNumA, int wA, int hA): JBIG2Segment(segNumA) { w = wA; h = hA; line = (wA + 7) >> 3; if (w <= 0 || h <= 0 || line <= 0 || h >= (INT_MAX - 1) / line) { error(-1, "invalid width/height"); data = NULL; return; } data = (Guchar *)gmalloc(h * line + 1); data[h * line] = 0; }
CPP
poppler
1
CVE-2016-10150
https://www.cvedetails.com/cve/CVE-2016-10150/
CWE-416
https://github.com/torvalds/linux/commit/a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
KVM: use after free in kvm_ioctl_create_device() We should move the ops->destroy(dev) after the list_del(&dev->vm_node) so that we don't use "dev" after freeing it. Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *) inode->i_private; /* The debugfs files are a reference to the kvm struct which * is still valid when kvm_destroy_vm is called. * To avoid the race between open and the removal of the debugfs * directory we test against the users count. */ if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0)) return -ENOENT; if (simple_attr_open(inode, file, get, set, fmt)) { kvm_put_kvm(stat_data->kvm); return -ENOMEM; } return 0; }
static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct kvm_stat_data *stat_data = (struct kvm_stat_data *) inode->i_private; /* The debugfs files are a reference to the kvm struct which * is still valid when kvm_destroy_vm is called. * To avoid the race between open and the removal of the debugfs * directory we test against the users count. */ if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0)) return -ENOENT; if (simple_attr_open(inode, file, get, set, fmt)) { kvm_put_kvm(stat_data->kvm); return -ENOMEM; } return 0; }
C
linux
0
CVE-2015-5296
https://www.cvedetails.com/cve/CVE-2015-5296/
CWE-20
https://git.samba.org/?p=samba.git;a=commit;h=a819d2b440aafa3138d95ff6e8b824da885a70e9
a819d2b440aafa3138d95ff6e8b824da885a70e9
null
static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req, enum tevent_req_state req_state) { struct smbXcli_conn_samba_suicide_state *state = tevent_req_data( req, struct smbXcli_conn_samba_suicide_state); TALLOC_FREE(state->write_req); if (state->conn == NULL) { return; } if (state->conn->suicide_req == req) { state->conn->suicide_req = NULL; } state->conn = NULL; }
static void smbXcli_conn_samba_suicide_cleanup(struct tevent_req *req, enum tevent_req_state req_state) { struct smbXcli_conn_samba_suicide_state *state = tevent_req_data( req, struct smbXcli_conn_samba_suicide_state); TALLOC_FREE(state->write_req); if (state->conn == NULL) { return; } if (state->conn->suicide_req == req) { state->conn->suicide_req = NULL; } state->conn = NULL; }
C
samba
0
CVE-2017-11664
https://www.cvedetails.com/cve/CVE-2017-11664/
CWE-125
https://github.com/Mindwerks/wildmidi/commit/660b513d99bced8783a4a5984ac2f742c74ebbdd
660b513d99bced8783a4a5984ac2f742c74ebbdd
Add a new size parameter to _WM_SetupMidiEvent() so that it knows where to stop reading, and adjust its users properly. Fixes bug #175 (CVE-2017-11661, CVE-2017-11662, CVE-2017-11663, CVE-2017-11664.)
static int midi_setup_patch(struct _mdi *mdi, uint8_t channel, uint8_t patch) { MIDI_EVENT_DEBUG(__FUNCTION__,channel, patch); _WM_CheckEventMemoryPool(mdi); mdi->events[mdi->event_count].do_event = *_WM_do_patch; mdi->events[mdi->event_count].event_data.channel = channel; mdi->events[mdi->event_count].event_data.data.value = patch; mdi->events[mdi->event_count].samples_to_next = 0; mdi->event_count++; if (mdi->channel[channel].isdrum) { mdi->channel[channel].bank = patch; } else { _WM_load_patch(mdi, ((mdi->channel[channel].bank << 8) | patch)); mdi->channel[channel].patch = _WM_get_patch_data(mdi, ((mdi->channel[channel].bank << 8) | patch)); } return (0); }
static int midi_setup_patch(struct _mdi *mdi, uint8_t channel, uint8_t patch) { MIDI_EVENT_DEBUG(__FUNCTION__,channel, patch); _WM_CheckEventMemoryPool(mdi); mdi->events[mdi->event_count].do_event = *_WM_do_patch; mdi->events[mdi->event_count].event_data.channel = channel; mdi->events[mdi->event_count].event_data.data.value = patch; mdi->events[mdi->event_count].samples_to_next = 0; mdi->event_count++; if (mdi->channel[channel].isdrum) { mdi->channel[channel].bank = patch; } else { _WM_load_patch(mdi, ((mdi->channel[channel].bank << 8) | patch)); mdi->channel[channel].patch = _WM_get_patch_data(mdi, ((mdi->channel[channel].bank << 8) | patch)); } return (0); }
C
wildmidi
0
CVE-2017-11144
https://www.cvedetails.com/cve/CVE-2017-11144/
CWE-754
https://git.php.net/?p=php-src.git;a=commit;h=73cabfedf519298e1a11192699f44d53c529315e
73cabfedf519298e1a11192699f44d53c529315e
null
PHP_FUNCTION(openssl_open) { zval *privkey, *opendata; EVP_PKEY *pkey; int len1, len2, cipher_iv_len; unsigned char *buf, *iv_buf; zend_resource *keyresource = NULL; EVP_CIPHER_CTX *ctx; char * data; size_t data_len; char * ekey; size_t ekey_len; char *method = NULL, *iv = NULL; size_t method_len = 0, iv_len = 0; const EVP_CIPHER *cipher; if (zend_parse_parameters(ZEND_NUM_ARGS(), "sz/sz|ss", &data, &data_len, &opendata, &ekey, &ekey_len, &privkey, &method, &method_len, &iv, &iv_len) == FAILURE) { return; } pkey = php_openssl_evp_from_zval(privkey, 0, "", 0, 0, &keyresource); if (pkey == NULL) { php_error_docref(NULL, E_WARNING, "unable to coerce parameter 4 into a private key"); RETURN_FALSE; } PHP_OPENSSL_CHECK_SIZE_T_TO_INT(ekey_len, ekey); PHP_OPENSSL_CHECK_SIZE_T_TO_INT(data_len, data); if (method) { cipher = EVP_get_cipherbyname(method); if (!cipher) { php_error_docref(NULL, E_WARNING, "Unknown signature algorithm."); RETURN_FALSE; } } else { cipher = EVP_rc4(); } cipher_iv_len = EVP_CIPHER_iv_length(cipher); if (cipher_iv_len > 0) { if (!iv) { php_error_docref(NULL, E_WARNING, "Cipher algorithm requires an IV to be supplied as a sixth parameter"); RETURN_FALSE; } if (cipher_iv_len != iv_len) { php_error_docref(NULL, E_WARNING, "IV length is invalid"); RETURN_FALSE; } iv_buf = (unsigned char *)iv; } else { iv_buf = NULL; } buf = emalloc(data_len + 1); ctx = EVP_CIPHER_CTX_new(); if (ctx != NULL && EVP_OpenInit(ctx, cipher, (unsigned char *)ekey, (int)ekey_len, iv_buf, pkey) && EVP_OpenUpdate(ctx, buf, &len1, (unsigned char *)data, (int)data_len) && EVP_OpenFinal(ctx, buf + len1, &len2) && (len1 + len2 > 0)) { zval_dtor(opendata); buf[len1 + len2] = '\0'; ZVAL_NEW_STR(opendata, zend_string_init((char*)buf, len1 + len2, 0)); RETVAL_TRUE; } else { RETVAL_FALSE; } efree(buf); if (keyresource == NULL) { EVP_PKEY_free(pkey); } EVP_CIPHER_CTX_free(ctx); }
PHP_FUNCTION(openssl_open) { zval *privkey, *opendata; EVP_PKEY *pkey; int len1, len2, cipher_iv_len; unsigned char *buf, *iv_buf; zend_resource *keyresource = NULL; EVP_CIPHER_CTX *ctx; char * data; size_t data_len; char * ekey; size_t ekey_len; char *method = NULL, *iv = NULL; size_t method_len = 0, iv_len = 0; const EVP_CIPHER *cipher; if (zend_parse_parameters(ZEND_NUM_ARGS(), "sz/sz|ss", &data, &data_len, &opendata, &ekey, &ekey_len, &privkey, &method, &method_len, &iv, &iv_len) == FAILURE) { return; } pkey = php_openssl_evp_from_zval(privkey, 0, "", 0, 0, &keyresource); if (pkey == NULL) { php_error_docref(NULL, E_WARNING, "unable to coerce parameter 4 into a private key"); RETURN_FALSE; } PHP_OPENSSL_CHECK_SIZE_T_TO_INT(ekey_len, ekey); PHP_OPENSSL_CHECK_SIZE_T_TO_INT(data_len, data); if (method) { cipher = EVP_get_cipherbyname(method); if (!cipher) { php_error_docref(NULL, E_WARNING, "Unknown signature algorithm."); RETURN_FALSE; } } else { cipher = EVP_rc4(); } cipher_iv_len = EVP_CIPHER_iv_length(cipher); if (cipher_iv_len > 0) { if (!iv) { php_error_docref(NULL, E_WARNING, "Cipher algorithm requires an IV to be supplied as a sixth parameter"); RETURN_FALSE; } if (cipher_iv_len != iv_len) { php_error_docref(NULL, E_WARNING, "IV length is invalid"); RETURN_FALSE; } iv_buf = (unsigned char *)iv; } else { iv_buf = NULL; } buf = emalloc(data_len + 1); ctx = EVP_CIPHER_CTX_new(); if (ctx != NULL && EVP_OpenInit(ctx, cipher, (unsigned char *)ekey, (int)ekey_len, iv_buf, pkey) && EVP_OpenUpdate(ctx, buf, &len1, (unsigned char *)data, (int)data_len) && EVP_OpenFinal(ctx, buf + len1, &len2) && (len1 + len2 > 0)) { zval_dtor(opendata); buf[len1 + len2] = '\0'; ZVAL_NEW_STR(opendata, zend_string_init((char*)buf, len1 + len2, 0)); RETVAL_TRUE; } else { RETVAL_FALSE; } efree(buf); if (keyresource == NULL) { EVP_PKEY_free(pkey); } EVP_CIPHER_CTX_free(ctx); }
C
php
0
CVE-2016-6308
https://www.cvedetails.com/cve/CVE-2016-6308/
CWE-399
https://git.openssl.org/?p=openssl.git;a=commit;h=df6b5e29ffea2d5a3e08de92fb765fdb21c7a21e
df6b5e29ffea2d5a3e08de92fb765fdb21c7a21e
null
void dtls1_hm_fragment_free(hm_fragment *frag) { if (!frag) return; if (frag->msg_header.is_ccs) { EVP_CIPHER_CTX_free(frag->msg_header. saved_retransmit_state.enc_write_ctx); EVP_MD_CTX_free(frag->msg_header.saved_retransmit_state.write_hash); } OPENSSL_free(frag->fragment); OPENSSL_free(frag->reassembly); OPENSSL_free(frag); }
void dtls1_hm_fragment_free(hm_fragment *frag) { if (!frag) return; if (frag->msg_header.is_ccs) { EVP_CIPHER_CTX_free(frag->msg_header. saved_retransmit_state.enc_write_ctx); EVP_MD_CTX_free(frag->msg_header.saved_retransmit_state.write_hash); } OPENSSL_free(frag->fragment); OPENSSL_free(frag->reassembly); OPENSSL_free(frag); }
C
openssl
0
CVE-2018-6103
https://www.cvedetails.com/cve/CVE-2018-6103/
CWE-20
https://github.com/chromium/chromium/commit/12c876ae82355de6285bf0879023f1d1f1822ecf
12c876ae82355de6285bf0879023f1d1f1822ecf
Fix MediaObserver notifications in MediaStreamManager. This CL fixes the stream type used to notify MediaObserver about cancelled MediaStream requests. Before this CL, NUM_MEDIA_TYPES was used as stream type to indicate that all stream types should be cancelled. However, the MediaObserver end does not interpret NUM_MEDIA_TYPES this way and the request to update the UI is ignored. This CL sends a separate notification for each stream type so that the UI actually gets updated for all stream types in use. Bug: 816033 Change-Id: Ib7d3b3046d1dd0976627f8ab38abf086eacc9405 Reviewed-on: https://chromium-review.googlesource.com/939630 Commit-Queue: Guido Urdaneta <guidou@chromium.org> Reviewed-by: Raymes Khoury <raymes@chromium.org> Cr-Commit-Position: refs/heads/master@{#540122}
MockAudioManager() : AudioManagerPlatform(std::make_unique<media::TestAudioThread>(), &fake_audio_log_factory_), num_output_devices_(2), num_input_devices_(2) {}
MockAudioManager() : AudioManagerPlatform(std::make_unique<media::TestAudioThread>(), &fake_audio_log_factory_), num_output_devices_(2), num_input_devices_(2) {}
C
Chrome
0
CVE-2015-7513
https://www.cvedetails.com/cve/CVE-2015-7513/
null
https://github.com/torvalds/linux/commit/0185604c2d82c560dab2f2933a18f797e74ab5a8
0185604c2d82c560dab2f2933a18f797e74ab5a8
KVM: x86: Reload pit counters for all channels when restoring state Currently if userspace restores the pit counters with a count of 0 on channels 1 or 2 and the guest attempts to read the count on those channels, then KVM will perform a mod of 0 and crash. This will ensure that 0 values are converted to 65536 as per the spec. This is CVE-2015-7513. Signed-off-by: Andy Honig <ahonig@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); }
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); }
C
linux
0
CVE-2012-2816
https://www.cvedetails.com/cve/CVE-2012-2816/
null
https://github.com/chromium/chromium/commit/cd0bd79d6ebdb72183e6f0833673464cc10b3600
cd0bd79d6ebdb72183e6f0833673464cc10b3600
Convert plugin and GPU process to brokered handle duplication. BUG=119250 Review URL: https://chromiumcodereview.appspot.com/9958034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
void CommandBufferProxyImpl::SetContextLostReason( gpu::error::ContextLostReason reason) { NOTREACHED(); }
void CommandBufferProxyImpl::SetContextLostReason( gpu::error::ContextLostReason reason) { NOTREACHED(); }
C
Chrome
0
CVE-2018-16790
https://www.cvedetails.com/cve/CVE-2018-16790/
CWE-125
https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
Fix for CVE-2018-16790 -- Verify bounds before binary length read. As reported here: https://jira.mongodb.org/browse/CDRIVER-2819, a heap overread occurs due a failure to correctly verify data bounds. In the original check, len - o returns the data left including the sizeof(l) we just read. Instead, the comparison should check against the data left NOT including the binary int32, i.e. just subtype (byte*) instead of int32 subtype (byte*). Added in test for corrupted BSON example.
test_bson_append_overflow (void) { const char *key = "a"; uint32_t len; bson_t b; len = BSON_MAX_SIZE; len -= 4; /* len */ len -= 1; /* type */ len -= 1; /* value */ len -= 1; /* end byte */ bson_init (&b); BSON_ASSERT (!bson_append_bool (&b, key, len, true)); bson_destroy (&b); }
test_bson_append_overflow (void) { const char *key = "a"; uint32_t len; bson_t b; len = BSON_MAX_SIZE; len -= 4; /* len */ len -= 1; /* type */ len -= 1; /* value */ len -= 1; /* end byte */ bson_init (&b); BSON_ASSERT (!bson_append_bool (&b, key, len, true)); bson_destroy (&b); }
C
mongo-c-driver
0
CVE-2014-4503
https://www.cvedetails.com/cve/CVE-2014-4503/
CWE-20
https://github.com/sgminer-dev/sgminer/commit/910c36089940e81fb85c65b8e63dcd2fac71470c
910c36089940e81fb85c65b8e63dcd2fac71470c
stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime. Might have introduced a memory leak, don't have time to check. :( Should the other hex2bin()'s be checked? Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this.
static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->sgminer_pool_stats.times_sent++; pool->sgminer_pool_stats.bytes_sent += ssent; pool->sgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; }
static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->sgminer_pool_stats.times_sent++; pool->sgminer_pool_stats.bytes_sent += ssent; pool->sgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; }
C
sgminer
0
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
void BrowserEventRouter::ExtensionActionExecuted( Profile* profile, const ExtensionAction& extension_action, WebContents* web_contents) { const char* event_name = NULL; switch (extension_action.action_type()) { case Extension::ActionInfo::TYPE_BROWSER: event_name = "browserAction.onClicked"; break; case Extension::ActionInfo::TYPE_PAGE: event_name = "pageAction.onClicked"; break; case Extension::ActionInfo::TYPE_SCRIPT_BADGE: event_name = "scriptBadge.onClicked"; break; } if (event_name) { scoped_ptr<ListValue> args(new ListValue()); DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue( web_contents, ExtensionTabUtil::INCLUDE_PRIVACY_SENSITIVE_FIELDS); args->Append(tab_value); DispatchEventToExtension(profile, extension_action.extension_id(), event_name, args.Pass(), EventRouter::USER_GESTURE_ENABLED); } }
void BrowserEventRouter::ExtensionActionExecuted( Profile* profile, const ExtensionAction& extension_action, WebContents* web_contents) { const char* event_name = NULL; switch (extension_action.action_type()) { case Extension::ActionInfo::TYPE_BROWSER: event_name = "browserAction.onClicked"; break; case Extension::ActionInfo::TYPE_PAGE: event_name = "pageAction.onClicked"; break; case Extension::ActionInfo::TYPE_SCRIPT_BADGE: event_name = "scriptBadge.onClicked"; break; } if (event_name) { scoped_ptr<ListValue> args(new ListValue()); DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue( web_contents, ExtensionTabUtil::INCLUDE_PRIVACY_SENSITIVE_FIELDS); args->Append(tab_value); DispatchEventToExtension(profile, extension_action.extension_id(), event_name, args.Pass(), EventRouter::USER_GESTURE_ENABLED); } }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/c4363d1ca65494cb7b271625e1ff6541a9f593c9
c4363d1ca65494cb7b271625e1ff6541a9f593c9
ozone: evdev: Add a couple more trace events Add trace event inside each read notification for evdev. BUG=none TEST=chrome://tracing in link_freon Review URL: https://codereview.chromium.org/1110693003 Cr-Commit-Position: refs/heads/master@{#327110}
TouchEventConverterEvdev::TouchEventConverterEvdev( int fd, base::FilePath path, int id, InputDeviceType type, const EventDeviceInfo& devinfo, DeviceEventDispatcherEvdev* dispatcher) : EventConverterEvdev(fd, path, id, type, devinfo.name(), devinfo.vendor_id(), devinfo.product_id()), dispatcher_(dispatcher), syn_dropped_(false), has_mt_(false), touch_points_(0), next_tracking_id_(0), current_slot_(0) { if (base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kExtraTouchNoiseFiltering)) { touch_noise_finder_.reset(new TouchNoiseFinder); } }
TouchEventConverterEvdev::TouchEventConverterEvdev( int fd, base::FilePath path, int id, InputDeviceType type, const EventDeviceInfo& devinfo, DeviceEventDispatcherEvdev* dispatcher) : EventConverterEvdev(fd, path, id, type, devinfo.name(), devinfo.vendor_id(), devinfo.product_id()), dispatcher_(dispatcher), syn_dropped_(false), has_mt_(false), touch_points_(0), next_tracking_id_(0), current_slot_(0) { if (base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kExtraTouchNoiseFiltering)) { touch_noise_finder_.reset(new TouchNoiseFinder); } }
C
Chrome
0
CVE-2013-1790
https://www.cvedetails.com/cve/CVE-2013-1790/
CWE-119
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=b1026b5978c385328f2a15a2185c599a563edf91
b1026b5978c385328f2a15a2185c599a563edf91
null
short CCITTFaxStream::getWhiteCode() { short code; const CCITTCode *p; int n; code = 0; // make gcc happy if (endOfBlock) { code = lookBits(12); if (code == EOF) { return 1; } if ((code >> 5) == 0) { p = &whiteTab1[code]; } else { p = &whiteTab2[code >> 3]; } if (p->bits > 0) { eatBits(p->bits); return p->n; } } else { for (n = 1; n <= 9; ++n) { code = lookBits(n); if (code == EOF) { return 1; } if (n < 9) { code <<= 9 - n; } p = &whiteTab2[code]; if (p->bits == n) { eatBits(n); return p->n; } } for (n = 11; n <= 12; ++n) { code = lookBits(n); if (code == EOF) { return 1; } if (n < 12) { code <<= 12 - n; } p = &whiteTab1[code]; if (p->bits == n) { eatBits(n); return p->n; } } } error(errSyntaxError, getPos(), "Bad white code ({0:04x}) in CCITTFax stream", code); eatBits(1); return 1; }
short CCITTFaxStream::getWhiteCode() { short code; const CCITTCode *p; int n; code = 0; // make gcc happy if (endOfBlock) { code = lookBits(12); if (code == EOF) { return 1; } if ((code >> 5) == 0) { p = &whiteTab1[code]; } else { p = &whiteTab2[code >> 3]; } if (p->bits > 0) { eatBits(p->bits); return p->n; } } else { for (n = 1; n <= 9; ++n) { code = lookBits(n); if (code == EOF) { return 1; } if (n < 9) { code <<= 9 - n; } p = &whiteTab2[code]; if (p->bits == n) { eatBits(n); return p->n; } } for (n = 11; n <= 12; ++n) { code = lookBits(n); if (code == EOF) { return 1; } if (n < 12) { code <<= 12 - n; } p = &whiteTab1[code]; if (p->bits == n) { eatBits(n); return p->n; } } } error(errSyntaxError, getPos(), "Bad white code ({0:04x}) in CCITTFax stream", code); eatBits(1); return 1; }
CPP
poppler
0
CVE-2013-0904
https://www.cvedetails.com/cve/CVE-2013-0904/
CWE-119
https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a
b2b21468c1f7f08b30a7c1755316f6026c50eb2a
Separate repaint and layout requirements of StyleDifference (Step 1) Previously StyleDifference was an enum that proximately bigger values imply smaller values (e.g. StyleDifferenceLayout implies StyleDifferenceRepaint). This causes unnecessary repaints in some cases on layout change. Convert StyleDifference to a structure containing relatively independent flags. This change doesn't directly improve the result, but can make further repaint optimizations possible. Step 1 doesn't change any functionality. RenderStyle still generate the legacy StyleDifference enum when comparing styles and convert the result to the new StyleDifference. Implicit requirements are not handled during the conversion. Converted call sites to use the new StyleDifference according to the following conversion rules: - diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange() - diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly() - diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout() - diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout() - diff > StyleDifferenceRepaintLayer => diff.needsLayout() - diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly() - diff == StyleDifferenceLayout => diff.needsFullLayout() BUG=358460 TEST=All existing layout tests. R=eseidel@chromium.org, esprehn@chromium.org, jchaffraix@chromium.org Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983 Review URL: https://codereview.chromium.org/236203020 git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538
LayoutUnit RenderBlockFlow::adjustLogicalRightOffsetForLine(LayoutUnit offsetFromFloats, bool applyTextIndent) const { LayoutUnit right = offsetFromFloats; if (applyTextIndent && !style()->isLeftToRightDirection()) right -= textIndentOffset(); return right; }
LayoutUnit RenderBlockFlow::adjustLogicalRightOffsetForLine(LayoutUnit offsetFromFloats, bool applyTextIndent) const { LayoutUnit right = offsetFromFloats; if (applyTextIndent && !style()->isLeftToRightDirection()) right -= textIndentOffset(); return right; }
C
Chrome
0
CVE-2018-16075
https://www.cvedetails.com/cve/CVE-2018-16075/
CWE-254
https://github.com/chromium/chromium/commit/d913f72b4875cf0814fc3f03ad7c00642097c4a4
d913f72b4875cf0814fc3f03ad7c00642097c4a4
Remove RequireCSSExtensionForFile runtime enabled flag. The feature has long since been stable (since M64) and doesn't seem to be a need for this flag. BUG=788936 Change-Id: I666390b869289c328acb4a2daa5bf4154e1702c0 Reviewed-on: https://chromium-review.googlesource.com/c/1324143 Reviewed-by: Mike West <mkwst@chromium.org> Reviewed-by: Camille Lamy <clamy@chromium.org> Commit-Queue: Dave Tapuska <dtapuska@chromium.org> Cr-Commit-Position: refs/heads/master@{#607329}
void WebRuntimeFeatures::EnablePrintBrowser(bool enable) { RuntimeEnabledFeatures::SetPrintBrowserEnabled(enable); }
void WebRuntimeFeatures::EnablePrintBrowser(bool enable) { RuntimeEnabledFeatures::SetPrintBrowserEnabled(enable); }
C
Chrome
0
CVE-2016-9915
https://www.cvedetails.com/cve/CVE-2016-9915/
CWE-400
https://git.qemu.org/?p=qemu.git;a=commit;h=971f406b77a6eb84e0ad27dcc416b663765aee30
971f406b77a6eb84e0ad27dcc416b663765aee30
null
static ssize_t handle_preadv(FsContext *ctx, V9fsFidOpenState *fs, const struct iovec *iov, int iovcnt, off_t offset) { #ifdef CONFIG_PREADV return preadv(fs->fd, iov, iovcnt, offset); #else int err = lseek(fs->fd, offset, SEEK_SET); if (err == -1) { return err; } else { return readv(fs->fd, iov, iovcnt); } #endif }
static ssize_t handle_preadv(FsContext *ctx, V9fsFidOpenState *fs, const struct iovec *iov, int iovcnt, off_t offset) { #ifdef CONFIG_PREADV return preadv(fs->fd, iov, iovcnt, offset); #else int err = lseek(fs->fd, offset, SEEK_SET); if (err == -1) { return err; } else { return readv(fs->fd, iov, iovcnt); } #endif }
C
qemu
0
CVE-2017-11721
https://www.cvedetails.com/cve/CVE-2017-11721/
CWE-119
https://github.com/ioquake/ioq3/commit/d2b1d124d4055c2fcbe5126863487c52fd58cca1
d2b1d124d4055c2fcbe5126863487c52fd58cca1
Fix/improve buffer overflow in MSG_ReadBits/MSG_WriteBits Prevent reading past end of message in MSG_ReadBits. If read past end of msg->data buffer (16348 bytes) the engine could SEGFAULT. Make MSG_WriteBits use an exact buffer overflow check instead of possibly failing with a few bytes left.
void MSG_WriteDeltaEntity( msg_t *msg, struct entityState_s *from, struct entityState_s *to, qboolean force ) { int i, lc; int numFields; netField_t *field; int trunc; float fullFloat; int *fromF, *toF; numFields = ARRAY_LEN( entityStateFields ); assert( numFields + 1 == sizeof( *from )/4 ); if ( to == NULL ) { if ( from == NULL ) { return; } MSG_WriteBits( msg, from->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 1, 1 ); return; } if ( to->number < 0 || to->number >= MAX_GENTITIES ) { Com_Error (ERR_FATAL, "MSG_WriteDeltaEntity: Bad entity number: %i", to->number ); } lc = 0; for ( i = 0, field = entityStateFields ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF != *toF ) { lc = i+1; } } if ( lc == 0 ) { if ( !force ) { return; // nothing at all } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 0, 1 ); // no delta return; } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 1, 1 ); // we have a delta MSG_WriteByte( msg, lc ); // # of changes oldsize += numFields; for ( i = 0, field = entityStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF == *toF ) { MSG_WriteBits( msg, 0, 1 ); // no change continue; } MSG_WriteBits( msg, 1, 1 ); // changed if ( field->bits == 0 ) { fullFloat = *(float *)toF; trunc = (int)fullFloat; if (fullFloat == 0.0f) { MSG_WriteBits( msg, 0, 1 ); oldsize += FLOAT_INT_BITS; } else { MSG_WriteBits( msg, 1, 1 ); if ( trunc == fullFloat && trunc + FLOAT_INT_BIAS >= 0 && trunc + FLOAT_INT_BIAS < ( 1 << FLOAT_INT_BITS ) ) { MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, trunc + FLOAT_INT_BIAS, FLOAT_INT_BITS ); } else { MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, 32 ); } } } else { if (*toF == 0) { MSG_WriteBits( msg, 0, 1 ); } else { MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, field->bits ); } } } }
void MSG_WriteDeltaEntity( msg_t *msg, struct entityState_s *from, struct entityState_s *to, qboolean force ) { int i, lc; int numFields; netField_t *field; int trunc; float fullFloat; int *fromF, *toF; numFields = ARRAY_LEN( entityStateFields ); assert( numFields + 1 == sizeof( *from )/4 ); if ( to == NULL ) { if ( from == NULL ) { return; } MSG_WriteBits( msg, from->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 1, 1 ); return; } if ( to->number < 0 || to->number >= MAX_GENTITIES ) { Com_Error (ERR_FATAL, "MSG_WriteDeltaEntity: Bad entity number: %i", to->number ); } lc = 0; for ( i = 0, field = entityStateFields ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF != *toF ) { lc = i+1; } } if ( lc == 0 ) { if ( !force ) { return; // nothing at all } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 0, 1 ); // no delta return; } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 1, 1 ); // we have a delta MSG_WriteByte( msg, lc ); // # of changes oldsize += numFields; for ( i = 0, field = entityStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF == *toF ) { MSG_WriteBits( msg, 0, 1 ); // no change continue; } MSG_WriteBits( msg, 1, 1 ); // changed if ( field->bits == 0 ) { fullFloat = *(float *)toF; trunc = (int)fullFloat; if (fullFloat == 0.0f) { MSG_WriteBits( msg, 0, 1 ); oldsize += FLOAT_INT_BITS; } else { MSG_WriteBits( msg, 1, 1 ); if ( trunc == fullFloat && trunc + FLOAT_INT_BIAS >= 0 && trunc + FLOAT_INT_BIAS < ( 1 << FLOAT_INT_BITS ) ) { MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, trunc + FLOAT_INT_BIAS, FLOAT_INT_BITS ); } else { MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, 32 ); } } } else { if (*toF == 0) { MSG_WriteBits( msg, 0, 1 ); } else { MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, field->bits ); } } } }
C
ioq3
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); }
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); }
C
linux
0
CVE-2010-1149
https://www.cvedetails.com/cve/CVE-2010-1149/
CWE-200
https://cgit.freedesktop.org/udisks/commit/?id=0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
null
partition_create_data_ref (CreatePartitionData *data) { data->refcount++; return data; }
partition_create_data_ref (CreatePartitionData *data) { data->refcount++; return data; }
C
udisks
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
bool config_save(const config_t *config, const char *filename) { assert(config != NULL); assert(filename != NULL); assert(*filename != '\0'); // Steps to ensure content of config file gets to disk: // // 1) Open and write to temp file (e.g. bt_config.conf.new). // 2) Sync the temp file to disk with fsync(). // 3) Rename temp file to actual config file (e.g. bt_config.conf). // This ensures atomic update. // 4) Sync directory that has the conf file with fsync(). // This ensures directory entries are up-to-date. int dir_fd = -1; FILE *fp = NULL; // Build temp config file based on config file (e.g. bt_config.conf.new). static const char *temp_file_ext = ".new"; const int filename_len = strlen(filename); const int temp_filename_len = filename_len + strlen(temp_file_ext) + 1; char *temp_filename = osi_calloc(temp_filename_len); snprintf(temp_filename, temp_filename_len, "%s%s", filename, temp_file_ext); // Extract directory from file path (e.g. /data/misc/bluedroid). char *temp_dirname = osi_strdup(filename); const char *directoryname = dirname(temp_dirname); if (!directoryname) { LOG_ERROR("%s error extracting directory from '%s': %s", __func__, filename, strerror(errno)); goto error; } dir_fd = TEMP_FAILURE_RETRY(open(directoryname, O_RDONLY)); if (dir_fd < 0) { LOG_ERROR("%s unable to open dir '%s': %s", __func__, directoryname, strerror(errno)); goto error; } fp = fopen(temp_filename, "wt"); if (!fp) { LOG_ERROR("%s unable to write file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } for (const list_node_t *node = list_begin(config->sections); node != list_end(config->sections); node = list_next(node)) { const section_t *section = (const section_t *)list_node(node); if (fprintf(fp, "[%s]\n", section->name) < 0) { LOG_ERROR("%s unable to write to file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } for (const list_node_t *enode = list_begin(section->entries); enode != list_end(section->entries); enode = list_next(enode)) { const entry_t *entry = (const entry_t *)list_node(enode); if (fprintf(fp, "%s = %s\n", entry->key, entry->value) < 0) { LOG_ERROR("%s unable to write to file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } } if (list_next(node) != list_end(config->sections)) { if (fputc('\n', fp) == EOF) { LOG_ERROR("%s unable to write to file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } } } // Sync written temp file out to disk. fsync() is blocking until data makes it to disk. if (fsync(fileno(fp)) < 0) { LOG_WARN("%s unable to fsync file '%s': %s", __func__, temp_filename, strerror(errno)); } if (fclose(fp) == EOF) { LOG_ERROR("%s unable to close file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } fp = NULL; if (chmod(temp_filename, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) == -1) { LOG_ERROR("%s unable to change file permissions '%s': %s", __func__, filename, strerror(errno)); goto error; } // Rename written temp file to the actual config file. if (rename(temp_filename, filename) == -1) { LOG_ERROR("%s unable to commit file '%s': %s", __func__, filename, strerror(errno)); goto error; } // This should ensure the directory is updated as well. if (fsync(dir_fd) < 0) { LOG_WARN("%s unable to fsync dir '%s': %s", __func__, directoryname, strerror(errno)); } if (close(dir_fd) < 0) { LOG_ERROR("%s unable to close dir '%s': %s", __func__, directoryname, strerror(errno)); goto error; } osi_free(temp_filename); osi_free(temp_dirname); return true; error: // This indicates there is a write issue. Unlink as partial data is not acceptable. unlink(temp_filename); if (fp) fclose(fp); if (dir_fd != -1) close(dir_fd); osi_free(temp_filename); osi_free(temp_dirname); return false; }
bool config_save(const config_t *config, const char *filename) { assert(config != NULL); assert(filename != NULL); assert(*filename != '\0'); char *temp_filename = osi_calloc(strlen(filename) + 5); if (!temp_filename) { LOG_ERROR("%s unable to allocate memory for filename.", __func__); return false; } strcpy(temp_filename, filename); strcat(temp_filename, ".new"); FILE *fp = fopen(temp_filename, "wt"); if (!fp) { LOG_ERROR("%s unable to write file '%s': %s", __func__, temp_filename, strerror(errno)); goto error; } for (const list_node_t *node = list_begin(config->sections); node != list_end(config->sections); node = list_next(node)) { const section_t *section = (const section_t *)list_node(node); fprintf(fp, "[%s]\n", section->name); for (const list_node_t *enode = list_begin(section->entries); enode != list_end(section->entries); enode = list_next(enode)) { const entry_t *entry = (const entry_t *)list_node(enode); fprintf(fp, "%s = %s\n", entry->key, entry->value); } if (list_next(node) != list_end(config->sections)) fputc('\n', fp); } fflush(fp); fclose(fp); if (chmod(temp_filename, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) == -1) { LOG_ERROR("%s unable to change file permissions '%s': %s", __func__, filename, strerror(errno)); goto error; } if (rename(temp_filename, filename) == -1) { LOG_ERROR("%s unable to commit file '%s': %s", __func__, filename, strerror(errno)); goto error; } osi_free(temp_filename); return true; error:; unlink(temp_filename); osi_free(temp_filename); return false; }
C
Android
1
CVE-2017-9060
https://www.cvedetails.com/cve/CVE-2017-9060/
CWE-772
https://git.qemu.org/?p=qemu.git;a=commit;h=dd248ed7e204ee8a1873914e02b8b526e8f1b80d
dd248ed7e204ee8a1873914e02b8b526e8f1b80d
null
static void virtio_gpu_resource_flush(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_flush rf; pixman_region16_t flush_region; int i; VIRTIO_GPU_FILL_CMD(rf); trace_virtio_gpu_cmd_res_flush(rf.resource_id, rf.r.width, rf.r.height, rf.r.x, rf.r.y); res = virtio_gpu_find_resource(g, rf.resource_id); if (!res) { qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", __func__, rf.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } if (rf.r.x > res->width || rf.r.y > res->height || rf.r.width > res->width || rf.r.height > res->height || rf.r.x + rf.r.width > res->width || rf.r.y + rf.r.height > res->height) { qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" " bounds for resource %d: %d %d %d %d vs %d %d\n", __func__, rf.resource_id, rf.r.x, rf.r.y, rf.r.width, rf.r.height, res->width, res->height); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } pixman_region_init_rect(&flush_region, rf.r.x, rf.r.y, rf.r.width, rf.r.height); for (i = 0; i < g->conf.max_outputs; i++) { struct virtio_gpu_scanout *scanout; pixman_region16_t region, finalregion; pixman_box16_t *extents; if (!(res->scanout_bitmask & (1 << i))) { continue; } scanout = &g->scanout[i]; pixman_region_init(&finalregion); pixman_region_init_rect(&region, scanout->x, scanout->y, scanout->width, scanout->height); pixman_region_intersect(&finalregion, &flush_region, &region); pixman_region_translate(&finalregion, -scanout->x, -scanout->y); extents = pixman_region_extents(&finalregion); /* work out the area we need to update for each console */ dpy_gfx_update(g->scanout[i].con, extents->x1, extents->y1, extents->x2 - extents->x1, extents->y2 - extents->y1); pixman_region_fini(&region); pixman_region_fini(&finalregion); } pixman_region_fini(&flush_region); }
static void virtio_gpu_resource_flush(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_flush rf; pixman_region16_t flush_region; int i; VIRTIO_GPU_FILL_CMD(rf); trace_virtio_gpu_cmd_res_flush(rf.resource_id, rf.r.width, rf.r.height, rf.r.x, rf.r.y); res = virtio_gpu_find_resource(g, rf.resource_id); if (!res) { qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", __func__, rf.resource_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } if (rf.r.x > res->width || rf.r.y > res->height || rf.r.width > res->width || rf.r.height > res->height || rf.r.x + rf.r.width > res->width || rf.r.y + rf.r.height > res->height) { qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" " bounds for resource %d: %d %d %d %d vs %d %d\n", __func__, rf.resource_id, rf.r.x, rf.r.y, rf.r.width, rf.r.height, res->width, res->height); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } pixman_region_init_rect(&flush_region, rf.r.x, rf.r.y, rf.r.width, rf.r.height); for (i = 0; i < g->conf.max_outputs; i++) { struct virtio_gpu_scanout *scanout; pixman_region16_t region, finalregion; pixman_box16_t *extents; if (!(res->scanout_bitmask & (1 << i))) { continue; } scanout = &g->scanout[i]; pixman_region_init(&finalregion); pixman_region_init_rect(&region, scanout->x, scanout->y, scanout->width, scanout->height); pixman_region_intersect(&finalregion, &flush_region, &region); pixman_region_translate(&finalregion, -scanout->x, -scanout->y); extents = pixman_region_extents(&finalregion); /* work out the area we need to update for each console */ dpy_gfx_update(g->scanout[i].con, extents->x1, extents->y1, extents->x2 - extents->x1, extents->y2 - extents->y1); pixman_region_fini(&region); pixman_region_fini(&finalregion); } pixman_region_fini(&flush_region); }
C
qemu
0
null
null
null
https://github.com/chromium/chromium/commit/10c7ed8f076afd290fccf283d8bc416959722ca3
10c7ed8f076afd290fccf283d8bc416959722ca3
Fix bug 130606: Panels [WIN]: Alt-Tabbing to a minimized panel no longer restores it BUG=130606 TEST=Manual test by minimizing panel and alt-tabbing to it Review URL: https://chromiumcodereview.appspot.com/10509011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@140498 0039d316-1c4b-4281-b951-d872f2087c98
Browser* PanelBrowserView::GetPanelBrowser() const { return browser(); }
Browser* PanelBrowserView::GetPanelBrowser() const { return browser(); }
C
Chrome
0
CVE-2017-0555
https://www.cvedetails.com/cve/CVE-2017-0555/
CWE-200
https://android.googlesource.com/platform/external/libavc/+/0b23c81c3dd9ec38f7e6806a3955fed1925541a0
0b23c81c3dd9ec38f7e6806a3955fed1925541a0
Decoder: Fixed initialization of first_slice_in_pic To handle some errors, first_slice_in_pic was being set to 2. This is now cleaned up and first_slice_in_pic is set to 1 only once per pic. This will ensure picture level initializations are done only once even in case of error clips Bug: 33717589 Bug: 33551775 Bug: 33716442 Bug: 33677995 Change-Id: If341436b3cbaa724017eedddd88c2e6fac36d8ba
WORD32 ih264d_parse_decode_slice(UWORD8 u1_is_idr_slice, UWORD8 u1_nal_ref_idc, dec_struct_t *ps_dec /* Decoder parameters */ ) { dec_bit_stream_t * ps_bitstrm = ps_dec->ps_bitstrm; dec_pic_params_t *ps_pps; dec_seq_params_t *ps_seq; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; pocstruct_t s_tmp_poc; WORD32 i_delta_poc[2]; WORD32 i4_poc = 0; UWORD16 u2_first_mb_in_slice, u2_frame_num; UWORD8 u1_field_pic_flag, u1_redundant_pic_cnt = 0, u1_slice_type; UWORD32 u4_idr_pic_id = 0; UWORD8 u1_bottom_field_flag, u1_pic_order_cnt_type; UWORD8 u1_nal_unit_type; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; WORD8 i1_is_end_of_poc; WORD32 ret, end_of_frame; WORD32 prev_slice_err, num_mb_skipped; UWORD8 u1_mbaff; pocstruct_t *ps_cur_poc; UWORD32 u4_temp; WORD32 i_temp; UWORD32 u4_call_end_of_pic = 0; /* read FirstMbInSlice and slice type*/ ps_dec->ps_dpb_cmds->u1_dpb_commands_read_slc = 0; u2_first_mb_in_slice = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u2_first_mb_in_slice > (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)) { return ERROR_CORRUPTED_SLICE; } /*we currently don not support ASO*/ if(((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) <= ps_dec->u2_cur_mb_addr) && (ps_dec->u4_first_slice_in_pic == 0)) { return ERROR_CORRUPTED_SLICE; } COPYTHECONTEXT("SH: first_mb_in_slice",u2_first_mb_in_slice); u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > 9) return ERROR_INV_SLC_TYPE_T; u1_slice_type = u4_temp; COPYTHECONTEXT("SH: slice_type",(u1_slice_type)); ps_dec->u1_sl_typ_5_9 = 0; /* Find Out the Slice Type is 5 to 9 or not then Set the Flag */ /* u1_sl_typ_5_9 = 1 .Which tells that all the slices in the Pic*/ /* will be of same type of current */ if(u1_slice_type > 4) { u1_slice_type -= 5; ps_dec->u1_sl_typ_5_9 = 1; } { UWORD32 skip; if((ps_dec->i4_app_skip_mode == IVD_SKIP_PB) || (ps_dec->i4_dec_skip_mode == IVD_SKIP_PB)) { UWORD32 u4_bit_stream_offset = 0; if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else if((I_SLICE == u1_slice_type) && (1 >= ps_dec->ps_cur_sps->u1_num_ref_frames)) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else { skip = 1; } /* If one frame worth of data is already skipped, do not skip the next one */ if((0 == u2_first_mb_in_slice) && (1 == ps_dec->u4_prev_nal_skipped)) { skip = 0; } if(skip) { ps_dec->u4_prev_nal_skipped = 1; ps_dec->i4_dec_skip_mode = IVD_SKIP_PB; return 0; } else { /* If the previous NAL was skipped, then do not process that buffer in this call. Return to app and process it in the next call. This is necessary to handle cases where I/IDR is not complete in the current buffer and application intends to fill the remaining part of the bitstream later. This ensures we process only frame worth of data in every call */ if(1 == ps_dec->u4_prev_nal_skipped) { ps_dec->u4_return_to_app = 1; return 0; } } } } u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp & MASK_ERR_PIC_SET_ID) return ERROR_INV_SLICE_HDR_T; /* discard slice if pic param is invalid */ COPYTHECONTEXT("SH: pic_parameter_set_id", u4_temp); ps_pps = &ps_dec->ps_pps[u4_temp]; if(FALSE == ps_pps->u1_is_valid) { return ERROR_INV_SLICE_HDR_T; } ps_seq = ps_pps->ps_sps; if(!ps_seq) return ERROR_INV_SLICE_HDR_T; if(FALSE == ps_seq->u1_is_valid) return ERROR_INV_SLICE_HDR_T; /* Get the frame num */ u2_frame_num = ih264d_get_bits_h264(ps_bitstrm, ps_seq->u1_bits_in_frm_num); COPYTHECONTEXT("SH: frame_num", u2_frame_num); if(!ps_dec->u1_first_slice_in_stream && ps_dec->u4_first_slice_in_pic) { pocstruct_t *ps_prev_poc = &ps_dec->s_prev_pic_poc; pocstruct_t *ps_cur_poc = &ps_dec->s_cur_pic_poc; ps_dec->u2_mbx = 0xffff; ps_dec->u2_mby = 0; if((0 == u1_is_idr_slice) && ps_cur_slice->u1_nal_ref_idc) ps_dec->u2_prev_ref_frame_num = ps_cur_slice->u2_frame_num; if(u1_is_idr_slice || ps_cur_slice->u1_mmco_equalto5) ps_dec->u2_prev_ref_frame_num = 0; if(ps_dec->ps_cur_sps->u1_gaps_in_frame_num_value_allowed_flag) { ih264d_decode_gaps_in_frame_num(ps_dec, u2_frame_num); } ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = ps_cur_poc->u2_frame_num; ps_prev_poc->u1_mmco_equalto5 = ps_cur_slice->u1_mmco_equalto5; if(ps_cur_slice->u1_nal_ref_idc) { ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_cur_poc->u1_bot_field; } ps_dec->u2_total_mbs_coded = 0; } /* Get the field related flags */ if(!ps_seq->u1_frame_mbs_only_flag) { u1_field_pic_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: field_pic_flag", u1_field_pic_flag); u1_bottom_field_flag = 0; if(u1_field_pic_flag) { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan_fld; u1_bottom_field_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: bottom_field_flag", u1_bottom_field_flag); } else { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } } else { u1_field_pic_flag = 0; u1_bottom_field_flag = 0; ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } u1_nal_unit_type = SLICE_NAL; if(u1_is_idr_slice) { if(0 == u1_field_pic_flag) { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } u1_nal_unit_type = IDR_SLICE_NAL; u4_idr_pic_id = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_idr_pic_id > 65535) return ERROR_INV_SLICE_HDR_T; COPYTHECONTEXT("SH: ", u4_idr_pic_id); } /* read delta pic order count information*/ i_delta_poc[0] = i_delta_poc[1] = 0; s_tmp_poc.i4_pic_order_cnt_lsb = 0; s_tmp_poc.i4_delta_pic_order_cnt_bottom = 0; u1_pic_order_cnt_type = ps_seq->u1_pic_order_cnt_type; if(u1_pic_order_cnt_type == 0) { i_temp = ih264d_get_bits_h264( ps_bitstrm, ps_seq->u1_log2_max_pic_order_cnt_lsb_minus); if(i_temp < 0 || i_temp >= ps_seq->i4_max_pic_order_cntLsb) return ERROR_INV_SLICE_HDR_T; s_tmp_poc.i4_pic_order_cnt_lsb = i_temp; COPYTHECONTEXT("SH: pic_order_cnt_lsb", s_tmp_poc.i4_pic_order_cnt_lsb); if((ps_pps->u1_pic_order_present_flag == 1) && (!u1_field_pic_flag)) { s_tmp_poc.i4_delta_pic_order_cnt_bottom = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt_bottom", s_tmp_poc.i4_delta_pic_order_cnt_bottom); } } s_tmp_poc.i4_delta_pic_order_cnt[0] = 0; s_tmp_poc.i4_delta_pic_order_cnt[1] = 0; if(u1_pic_order_cnt_type == 1 && (!ps_seq->u1_delta_pic_order_always_zero_flag)) { s_tmp_poc.i4_delta_pic_order_cnt[0] = ih264d_sev(pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[0]", s_tmp_poc.i4_delta_pic_order_cnt[0]); if(ps_pps->u1_pic_order_present_flag && !u1_field_pic_flag) { s_tmp_poc.i4_delta_pic_order_cnt[1] = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[1]", s_tmp_poc.i4_delta_pic_order_cnt[1]); } } if(ps_pps->u1_redundant_pic_cnt_present_flag) { u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > MAX_REDUNDANT_PIC_CNT) return ERROR_INV_SLICE_HDR_T; u1_redundant_pic_cnt = u4_temp; COPYTHECONTEXT("SH: redundant_pic_cnt", u1_redundant_pic_cnt); } /*--------------------------------------------------------------------*/ /* Check if the slice is part of new picture */ /*--------------------------------------------------------------------*/ /* First slice of a picture is always considered as part of new picture */ i1_is_end_of_poc = 1; ps_dec->ps_dec_err_status->u1_err_flag &= MASK_REJECT_CUR_PIC; if(ps_dec->u4_first_slice_in_pic == 0) { i1_is_end_of_poc = ih264d_is_end_of_pic(u2_frame_num, u1_nal_ref_idc, &s_tmp_poc, &ps_dec->s_cur_pic_poc, ps_cur_slice, u1_pic_order_cnt_type, u1_nal_unit_type, u4_idr_pic_id, u1_field_pic_flag, u1_bottom_field_flag); if(i1_is_end_of_poc) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } } /*--------------------------------------------------------------------*/ /* Check for error in slice and parse the missing/corrupted MB's */ /* as skip-MB's in an inserted P-slice */ /*--------------------------------------------------------------------*/ u1_mbaff = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); prev_slice_err = 0; if(i1_is_end_of_poc || ps_dec->u1_first_slice_in_stream) { if(u2_frame_num != ps_dec->u2_prv_frame_num && ps_dec->u1_top_bottom_decoded != 0 && ps_dec->u1_top_bottom_decoded != (TOP_FIELD_ONLY | BOT_FIELD_ONLY)) { ps_dec->u1_dangling_field = 1; if(ps_dec->u4_first_slice_in_pic) { prev_slice_err = 1; } else { prev_slice_err = 2; } if(ps_dec->u1_top_bottom_decoded ==TOP_FIELD_ONLY) ps_cur_slice->u1_bottom_field_flag = 1; else ps_cur_slice->u1_bottom_field_flag = 0; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &ps_dec->s_cur_pic_poc; u1_is_idr_slice = ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL; } else if(ps_dec->u4_first_slice_in_pic) { if(u2_first_mb_in_slice > 0) { prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; ps_cur_poc = &s_tmp_poc; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); } } else { /* since i1_is_end_of_poc is set ,means new frame num is encountered. so conceal the current frame * completely */ prev_slice_err = 2; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } } else { if((u2_first_mb_in_slice << u1_mbaff) > ps_dec->u2_total_mbs_coded) { prev_slice_err = 2; num_mb_skipped = (u2_first_mb_in_slice << u1_mbaff) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } else if((u2_first_mb_in_slice << u1_mbaff) < ps_dec->u2_total_mbs_coded) { return ERROR_CORRUPTED_SLICE; } } if(prev_slice_err) { ret = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, u1_is_idr_slice, u2_frame_num, ps_cur_poc, prev_slice_err); if(ps_dec->u1_dangling_field == 1) { ps_dec->u1_second_field = 1 - ps_dec->u1_second_field; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_dec->u2_prv_frame_num = u2_frame_num; ps_dec->u1_first_slice_in_stream = 0; return ERROR_DANGLING_FIELD_IN_PIC; } if(prev_slice_err == 2) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { /* return if all MBs in frame are parsed*/ ps_dec->u1_first_slice_in_stream = 0; return ERROR_IN_LAST_SLICE_OF_PIC; } if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return ERROR_NEW_FRAME_EXPECTED; } if(ret != OK) return ret; i1_is_end_of_poc = 0; } if (ps_dec->u4_first_slice_in_pic == 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } if((ps_dec->u1_separate_parse == 0) && (ps_dec->u4_first_slice_in_pic == 0)) { ps_dec->ps_decode_cur_slice++; } ps_dec->u1_slice_header_done = 0; if(u1_field_pic_flag) { ps_dec->u2_prv_frame_num = u2_frame_num; } if(ps_cur_slice->u1_mmco_equalto5) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; if(!ps_cur_slice->u1_field_pic_flag) // or a complementary field pair { i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); } else if(!ps_cur_slice->u1_bottom_field_flag) i4_temp_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; else i4_temp_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_top_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_poc = i4_temp_poc; ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } if(ps_dec->u4_first_slice_in_pic) { ret = ih264d_decode_pic_order_cnt(u1_is_idr_slice, u2_frame_num, &ps_dec->s_prev_pic_poc, &s_tmp_poc, ps_cur_slice, ps_pps, u1_nal_ref_idc, u1_bottom_field_flag, u1_field_pic_flag, &i4_poc); if(ret != OK) return ret; /* Display seq no calculations */ if(i4_poc >= ps_dec->i4_max_poc) ps_dec->i4_max_poc = i4_poc; /* IDR Picture or POC wrap around */ if(i4_poc == 0) { ps_dec->i4_prev_max_display_seq = ps_dec->i4_prev_max_display_seq + ps_dec->i4_max_poc + ps_dec->u1_max_dec_frame_buffering + 1; ps_dec->i4_max_poc = 0; } } /*--------------------------------------------------------------------*/ /* Copy the values read from the bitstream to the slice header and then*/ /* If the slice is first slice in picture, then do Start of Picture */ /* processing. */ /*--------------------------------------------------------------------*/ ps_cur_slice->i4_delta_pic_order_cnt[0] = i_delta_poc[0]; ps_cur_slice->i4_delta_pic_order_cnt[1] = i_delta_poc[1]; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u2_first_mb_in_slice = u2_first_mb_in_slice; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->u1_slice_type = u1_slice_type; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; if(ps_seq->u1_frame_mbs_only_flag) ps_cur_slice->u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; else ps_cur_slice->u1_direct_8x8_inference_flag = 1; if(u1_slice_type == B_SLICE) { ps_cur_slice->u1_direct_spatial_mv_pred_flag = ih264d_get_bit_h264( ps_bitstrm); COPYTHECONTEXT("SH: direct_spatial_mv_pred_flag", ps_cur_slice->u1_direct_spatial_mv_pred_flag); if(ps_cur_slice->u1_direct_spatial_mv_pred_flag) ps_cur_slice->pf_decodeDirect = ih264d_decode_spatial_direct; else ps_cur_slice->pf_decodeDirect = ih264d_decode_temporal_direct; if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaffB; } else { if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; } if(ps_dec->u4_first_slice_in_pic) { if(u2_first_mb_in_slice == 0) { ret = ih264d_start_of_pic(ps_dec, i4_poc, &s_tmp_poc, u2_frame_num, ps_pps); if(ret != OK) return ret; } ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } /* INITIALIZATION of fn ptrs for MC and formMbPartInfo functions */ { UWORD8 uc_nofield_nombaff; uc_nofield_nombaff = ((ps_dec->ps_cur_slice->u1_field_pic_flag == 0) && (ps_dec->ps_cur_slice->u1_mbaff_frame_flag == 0) && (u1_slice_type != B_SLICE) && (ps_dec->ps_cur_pps->u1_wted_pred_flag == 0)); /* Initialise MC and formMbPartInfo fn ptrs one time based on profile_idc */ if(uc_nofield_nombaff) { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; } else { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_mp; ps_dec->p_motion_compensate = ih264d_motion_compensate_mp; } } /* * Decide whether to decode the current picture or not */ { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_err->u4_frm_sei_sync == u2_frame_num) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; ps_err->u4_frm_sei_sync = SYNC_FRM_DEFAULT; } ps_err->u4_cur_frm = u2_frame_num; } /* Decision for decoding if the picture is to be skipped */ { WORD32 i4_skip_b_pic, i4_skip_p_pic; i4_skip_b_pic = (ps_dec->u4_skip_frm_mask & B_SLC_BIT) && (B_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); i4_skip_p_pic = (ps_dec->u4_skip_frm_mask & P_SLC_BIT) && (P_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); /**************************************************************/ /* Skip the B picture if skip mask is set for B picture and */ /* Current B picture is a non reference B picture or there is */ /* no user for reference B picture */ /**************************************************************/ if(i4_skip_b_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; /* Don't decode the picture in SKIP-B mode if that picture is B */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } /**************************************************************/ /* Skip the P picture if skip mask is set for P picture and */ /* Current P picture is a non reference P picture or there is */ /* no user for reference P picture */ /**************************************************************/ if(i4_skip_p_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; /* Don't decode the picture in SKIP-P mode if that picture is P */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } } { UWORD16 u2_mb_x, u2_mb_y; ps_dec->i4_submb_ofst = ((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) * SUB_BLK_SIZE) - SUB_BLK_SIZE; if(u2_first_mb_in_slice) { UWORD8 u1_mb_aff; UWORD8 u1_field_pic; UWORD16 u2_frm_wd_in_mbs; u2_frm_wd_in_mbs = ps_seq->u2_frm_wd_in_mbs; u1_mb_aff = ps_cur_slice->u1_mbaff_frame_flag; u1_field_pic = ps_cur_slice->u1_field_pic_flag; { UWORD32 x_offset; UWORD32 y_offset; UWORD32 u4_frame_stride; tfr_ctxt_t *ps_trns_addr; // = &ps_dec->s_tran_addrecon_parse; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = MOD(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y = DIV(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y <<= u1_mb_aff; if((u2_mb_x > u2_frm_wd_in_mbs - 1) || (u2_mb_y > ps_dec->u2_frm_ht_in_mbs - 1)) { return ERROR_CORRUPTED_SLICE; } u4_frame_stride = ps_dec->u2_frm_wd_y << u1_field_pic; x_offset = u2_mb_x << 4; y_offset = (u2_mb_y * u4_frame_stride) << 4; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1 + x_offset + y_offset; u4_frame_stride = ps_dec->u2_frm_wd_uv << u1_field_pic; x_offset >>= 1; y_offset = (u2_mb_y * u4_frame_stride) << 3; x_offset *= YUV420SP_FACTOR; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2 + x_offset + y_offset; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3 + x_offset + y_offset; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; if(ps_dec->u1_separate_parse == 1) { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } else { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } ps_dec->u2_cur_mb_addr = (u2_first_mb_in_slice << u1_mb_aff); ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv + ((u2_first_mb_in_slice << u1_mb_aff) << 4); } } else { tfr_ctxt_t *ps_trns_addr; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = 0xffff; u2_mb_y = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; } ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= ps_cur_slice->u1_mbaff_frame_flag; ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; } /* RBSP stop bit is used for CABAC decoding*/ ps_bitstrm->u4_max_ofst += ps_dec->ps_cur_pps->u1_entropy_coding_mode; ps_dec->u1_B = (u1_slice_type == B_SLICE); ps_dec->u4_next_mb_skip = 0; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->ps_cur_slice->u2_first_mb_in_slice; ps_dec->ps_parse_cur_slice->slice_type = ps_dec->ps_cur_slice->u1_slice_type; ps_dec->u4_start_recon_deblk = 1; { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = ( void *)pu1_buf; } if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } if(u1_slice_type == I_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= I_SLC_BIT; ret = ih264d_parse_islice(ps_dec, u2_first_mb_in_slice); if(ps_dec->i4_pic_type != B_SLICE && ps_dec->i4_pic_type != P_SLICE) ps_dec->i4_pic_type = I_SLICE; } else if(u1_slice_type == P_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; ret = ih264d_parse_pslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; if(ps_dec->i4_pic_type != B_SLICE) ps_dec->i4_pic_type = P_SLICE; } else if(u1_slice_type == B_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; ret = ih264d_parse_bslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; ps_dec->i4_pic_type = B_SLICE; } else return ERROR_INV_SLC_TYPE_T; if(ps_dec->u1_slice_header_done) { /* set to zero to indicate a valid slice has been decoded */ ps_dec->u1_first_slice_in_stream = 0; } if(ret != OK) return ret; /* storing last Mb X and MbY of the slice */ ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; /* End of Picture detection */ if(ps_dec->u2_total_mbs_coded >= (ps_seq->u2_max_mb_addr + 1)) { ps_dec->u1_pic_decode_done = 1; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if((ps_err->u1_err_flag & REJECT_PB_PICS) && (ps_err->u1_cur_pic_type == PIC_TYPE_I)) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } PRINT_BIN_BIT_RATIO(ps_dec) return ret; }
WORD32 ih264d_parse_decode_slice(UWORD8 u1_is_idr_slice, UWORD8 u1_nal_ref_idc, dec_struct_t *ps_dec /* Decoder parameters */ ) { dec_bit_stream_t * ps_bitstrm = ps_dec->ps_bitstrm; dec_pic_params_t *ps_pps; dec_seq_params_t *ps_seq; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; pocstruct_t s_tmp_poc; WORD32 i_delta_poc[2]; WORD32 i4_poc = 0; UWORD16 u2_first_mb_in_slice, u2_frame_num; UWORD8 u1_field_pic_flag, u1_redundant_pic_cnt = 0, u1_slice_type; UWORD32 u4_idr_pic_id = 0; UWORD8 u1_bottom_field_flag, u1_pic_order_cnt_type; UWORD8 u1_nal_unit_type; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; WORD8 i1_is_end_of_poc; WORD32 ret, end_of_frame; WORD32 prev_slice_err, num_mb_skipped; UWORD8 u1_mbaff; pocstruct_t *ps_cur_poc; UWORD32 u4_temp; WORD32 i_temp; UWORD32 u4_call_end_of_pic = 0; /* read FirstMbInSlice and slice type*/ ps_dec->ps_dpb_cmds->u1_dpb_commands_read_slc = 0; u2_first_mb_in_slice = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u2_first_mb_in_slice > (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)) { return ERROR_CORRUPTED_SLICE; } /*we currently don not support ASO*/ if(((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) <= ps_dec->u2_cur_mb_addr) && (ps_dec->u4_first_slice_in_pic == 0)) { return ERROR_CORRUPTED_SLICE; } COPYTHECONTEXT("SH: first_mb_in_slice",u2_first_mb_in_slice); u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > 9) return ERROR_INV_SLC_TYPE_T; u1_slice_type = u4_temp; COPYTHECONTEXT("SH: slice_type",(u1_slice_type)); ps_dec->u1_sl_typ_5_9 = 0; /* Find Out the Slice Type is 5 to 9 or not then Set the Flag */ /* u1_sl_typ_5_9 = 1 .Which tells that all the slices in the Pic*/ /* will be of same type of current */ if(u1_slice_type > 4) { u1_slice_type -= 5; ps_dec->u1_sl_typ_5_9 = 1; } { UWORD32 skip; if((ps_dec->i4_app_skip_mode == IVD_SKIP_PB) || (ps_dec->i4_dec_skip_mode == IVD_SKIP_PB)) { UWORD32 u4_bit_stream_offset = 0; if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else if((I_SLICE == u1_slice_type) && (1 >= ps_dec->ps_cur_sps->u1_num_ref_frames)) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else { skip = 1; } /* If one frame worth of data is already skipped, do not skip the next one */ if((0 == u2_first_mb_in_slice) && (1 == ps_dec->u4_prev_nal_skipped)) { skip = 0; } if(skip) { ps_dec->u4_prev_nal_skipped = 1; ps_dec->i4_dec_skip_mode = IVD_SKIP_PB; return 0; } else { /* If the previous NAL was skipped, then do not process that buffer in this call. Return to app and process it in the next call. This is necessary to handle cases where I/IDR is not complete in the current buffer and application intends to fill the remaining part of the bitstream later. This ensures we process only frame worth of data in every call */ if(1 == ps_dec->u4_prev_nal_skipped) { ps_dec->u4_return_to_app = 1; return 0; } } } } u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp & MASK_ERR_PIC_SET_ID) return ERROR_INV_SLICE_HDR_T; /* discard slice if pic param is invalid */ COPYTHECONTEXT("SH: pic_parameter_set_id", u4_temp); ps_pps = &ps_dec->ps_pps[u4_temp]; if(FALSE == ps_pps->u1_is_valid) { return ERROR_INV_SLICE_HDR_T; } ps_seq = ps_pps->ps_sps; if(!ps_seq) return ERROR_INV_SLICE_HDR_T; if(FALSE == ps_seq->u1_is_valid) return ERROR_INV_SLICE_HDR_T; /* Get the frame num */ u2_frame_num = ih264d_get_bits_h264(ps_bitstrm, ps_seq->u1_bits_in_frm_num); COPYTHECONTEXT("SH: frame_num", u2_frame_num); if(!ps_dec->u1_first_slice_in_stream && (ps_dec->u4_first_slice_in_pic == 2)) { pocstruct_t *ps_prev_poc = &ps_dec->s_prev_pic_poc; pocstruct_t *ps_cur_poc = &ps_dec->s_cur_pic_poc; ps_dec->u2_mbx = 0xffff; ps_dec->u2_mby = 0; if((0 == u1_is_idr_slice) && ps_cur_slice->u1_nal_ref_idc) ps_dec->u2_prev_ref_frame_num = ps_cur_slice->u2_frame_num; if(u1_is_idr_slice || ps_cur_slice->u1_mmco_equalto5) ps_dec->u2_prev_ref_frame_num = 0; if(ps_dec->ps_cur_sps->u1_gaps_in_frame_num_value_allowed_flag) { ih264d_decode_gaps_in_frame_num(ps_dec, u2_frame_num); } ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = ps_cur_poc->u2_frame_num; ps_prev_poc->u1_mmco_equalto5 = ps_cur_slice->u1_mmco_equalto5; if(ps_cur_slice->u1_nal_ref_idc) { ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_cur_poc->u1_bot_field; } ps_dec->u2_total_mbs_coded = 0; } /* Get the field related flags */ if(!ps_seq->u1_frame_mbs_only_flag) { u1_field_pic_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: field_pic_flag", u1_field_pic_flag); u1_bottom_field_flag = 0; if(u1_field_pic_flag) { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan_fld; u1_bottom_field_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: bottom_field_flag", u1_bottom_field_flag); } else { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } } else { u1_field_pic_flag = 0; u1_bottom_field_flag = 0; ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } u1_nal_unit_type = SLICE_NAL; if(u1_is_idr_slice) { if(0 == u1_field_pic_flag) { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } u1_nal_unit_type = IDR_SLICE_NAL; u4_idr_pic_id = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_idr_pic_id > 65535) return ERROR_INV_SLICE_HDR_T; COPYTHECONTEXT("SH: ", u4_idr_pic_id); } /* read delta pic order count information*/ i_delta_poc[0] = i_delta_poc[1] = 0; s_tmp_poc.i4_pic_order_cnt_lsb = 0; s_tmp_poc.i4_delta_pic_order_cnt_bottom = 0; u1_pic_order_cnt_type = ps_seq->u1_pic_order_cnt_type; if(u1_pic_order_cnt_type == 0) { i_temp = ih264d_get_bits_h264( ps_bitstrm, ps_seq->u1_log2_max_pic_order_cnt_lsb_minus); if(i_temp < 0 || i_temp >= ps_seq->i4_max_pic_order_cntLsb) return ERROR_INV_SLICE_HDR_T; s_tmp_poc.i4_pic_order_cnt_lsb = i_temp; COPYTHECONTEXT("SH: pic_order_cnt_lsb", s_tmp_poc.i4_pic_order_cnt_lsb); if((ps_pps->u1_pic_order_present_flag == 1) && (!u1_field_pic_flag)) { s_tmp_poc.i4_delta_pic_order_cnt_bottom = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt_bottom", s_tmp_poc.i4_delta_pic_order_cnt_bottom); } } s_tmp_poc.i4_delta_pic_order_cnt[0] = 0; s_tmp_poc.i4_delta_pic_order_cnt[1] = 0; if(u1_pic_order_cnt_type == 1 && (!ps_seq->u1_delta_pic_order_always_zero_flag)) { s_tmp_poc.i4_delta_pic_order_cnt[0] = ih264d_sev(pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[0]", s_tmp_poc.i4_delta_pic_order_cnt[0]); if(ps_pps->u1_pic_order_present_flag && !u1_field_pic_flag) { s_tmp_poc.i4_delta_pic_order_cnt[1] = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[1]", s_tmp_poc.i4_delta_pic_order_cnt[1]); } } if(ps_pps->u1_redundant_pic_cnt_present_flag) { u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > MAX_REDUNDANT_PIC_CNT) return ERROR_INV_SLICE_HDR_T; u1_redundant_pic_cnt = u4_temp; COPYTHECONTEXT("SH: redundant_pic_cnt", u1_redundant_pic_cnt); } /*--------------------------------------------------------------------*/ /* Check if the slice is part of new picture */ /*--------------------------------------------------------------------*/ /* First slice of a picture is always considered as part of new picture */ i1_is_end_of_poc = 1; ps_dec->ps_dec_err_status->u1_err_flag &= MASK_REJECT_CUR_PIC; if(ps_dec->u4_first_slice_in_pic != 2) { i1_is_end_of_poc = ih264d_is_end_of_pic(u2_frame_num, u1_nal_ref_idc, &s_tmp_poc, &ps_dec->s_cur_pic_poc, ps_cur_slice, u1_pic_order_cnt_type, u1_nal_unit_type, u4_idr_pic_id, u1_field_pic_flag, u1_bottom_field_flag); if(i1_is_end_of_poc) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } } /*--------------------------------------------------------------------*/ /* Check for error in slice and parse the missing/corrupted MB's */ /* as skip-MB's in an inserted P-slice */ /*--------------------------------------------------------------------*/ u1_mbaff = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); prev_slice_err = 0; if(i1_is_end_of_poc || ps_dec->u1_first_slice_in_stream) { if(u2_frame_num != ps_dec->u2_prv_frame_num && ps_dec->u1_top_bottom_decoded != 0 && ps_dec->u1_top_bottom_decoded != (TOP_FIELD_ONLY | BOT_FIELD_ONLY)) { ps_dec->u1_dangling_field = 1; if(ps_dec->u4_first_slice_in_pic) { prev_slice_err = 1; } else { prev_slice_err = 2; } if(ps_dec->u1_top_bottom_decoded ==TOP_FIELD_ONLY) ps_cur_slice->u1_bottom_field_flag = 1; else ps_cur_slice->u1_bottom_field_flag = 0; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &ps_dec->s_cur_pic_poc; u1_is_idr_slice = ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL; } else if(ps_dec->u4_first_slice_in_pic == 2) { if(u2_first_mb_in_slice > 0) { prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; ps_cur_poc = &s_tmp_poc; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); } } else { if(ps_dec->u4_first_slice_in_pic) { /* if valid slice header is not decoded do start of pic processing * since in the current process call, frame num is not updated in the slice structure yet * ih264d_is_end_of_pic is checked with valid frame num of previous process call, * although i1_is_end_of_poc is set there could be more slices in the frame, * so conceal only till cur slice */ prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; } else { /* since i1_is_end_of_poc is set ,means new frame num is encountered. so conceal the current frame * completely */ prev_slice_err = 2; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; } ps_cur_poc = &s_tmp_poc; } } else { if((u2_first_mb_in_slice << u1_mbaff) > ps_dec->u2_total_mbs_coded) { prev_slice_err = 2; num_mb_skipped = (u2_first_mb_in_slice << u1_mbaff) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } else if((u2_first_mb_in_slice << u1_mbaff) < ps_dec->u2_total_mbs_coded) { return ERROR_CORRUPTED_SLICE; } } if(prev_slice_err) { ret = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, u1_is_idr_slice, u2_frame_num, ps_cur_poc, prev_slice_err); if(ps_dec->u1_dangling_field == 1) { ps_dec->u1_second_field = 1 - ps_dec->u1_second_field; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_dec->u2_prv_frame_num = u2_frame_num; ps_dec->u1_first_slice_in_stream = 0; return ERROR_DANGLING_FIELD_IN_PIC; } if(prev_slice_err == 2) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { /* return if all MBs in frame are parsed*/ ps_dec->u1_first_slice_in_stream = 0; return ERROR_IN_LAST_SLICE_OF_PIC; } if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return ERROR_NEW_FRAME_EXPECTED; } if(ret != OK) return ret; i1_is_end_of_poc = 0; } if (ps_dec->u4_first_slice_in_pic == 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } if((ps_dec->u1_separate_parse == 0) && (ps_dec->u4_first_slice_in_pic == 0)) { ps_dec->ps_decode_cur_slice++; } ps_dec->u1_slice_header_done = 0; if(u1_field_pic_flag) { ps_dec->u2_prv_frame_num = u2_frame_num; } if(ps_cur_slice->u1_mmco_equalto5) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; if(!ps_cur_slice->u1_field_pic_flag) // or a complementary field pair { i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); } else if(!ps_cur_slice->u1_bottom_field_flag) i4_temp_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; else i4_temp_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_top_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_poc = i4_temp_poc; ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } if(ps_dec->u4_first_slice_in_pic == 2) { ret = ih264d_decode_pic_order_cnt(u1_is_idr_slice, u2_frame_num, &ps_dec->s_prev_pic_poc, &s_tmp_poc, ps_cur_slice, ps_pps, u1_nal_ref_idc, u1_bottom_field_flag, u1_field_pic_flag, &i4_poc); if(ret != OK) return ret; /* Display seq no calculations */ if(i4_poc >= ps_dec->i4_max_poc) ps_dec->i4_max_poc = i4_poc; /* IDR Picture or POC wrap around */ if(i4_poc == 0) { ps_dec->i4_prev_max_display_seq = ps_dec->i4_prev_max_display_seq + ps_dec->i4_max_poc + ps_dec->u1_max_dec_frame_buffering + 1; ps_dec->i4_max_poc = 0; } } /*--------------------------------------------------------------------*/ /* Copy the values read from the bitstream to the slice header and then*/ /* If the slice is first slice in picture, then do Start of Picture */ /* processing. */ /*--------------------------------------------------------------------*/ ps_cur_slice->i4_delta_pic_order_cnt[0] = i_delta_poc[0]; ps_cur_slice->i4_delta_pic_order_cnt[1] = i_delta_poc[1]; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u2_first_mb_in_slice = u2_first_mb_in_slice; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->u1_slice_type = u1_slice_type; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; if(ps_seq->u1_frame_mbs_only_flag) ps_cur_slice->u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; else ps_cur_slice->u1_direct_8x8_inference_flag = 1; if(u1_slice_type == B_SLICE) { ps_cur_slice->u1_direct_spatial_mv_pred_flag = ih264d_get_bit_h264( ps_bitstrm); COPYTHECONTEXT("SH: direct_spatial_mv_pred_flag", ps_cur_slice->u1_direct_spatial_mv_pred_flag); if(ps_cur_slice->u1_direct_spatial_mv_pred_flag) ps_cur_slice->pf_decodeDirect = ih264d_decode_spatial_direct; else ps_cur_slice->pf_decodeDirect = ih264d_decode_temporal_direct; if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaffB; } else { if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; } if(ps_dec->u4_first_slice_in_pic == 2) { if(u2_first_mb_in_slice == 0) { ret = ih264d_start_of_pic(ps_dec, i4_poc, &s_tmp_poc, u2_frame_num, ps_pps); if(ret != OK) return ret; } ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } /* INITIALIZATION of fn ptrs for MC and formMbPartInfo functions */ { UWORD8 uc_nofield_nombaff; uc_nofield_nombaff = ((ps_dec->ps_cur_slice->u1_field_pic_flag == 0) && (ps_dec->ps_cur_slice->u1_mbaff_frame_flag == 0) && (u1_slice_type != B_SLICE) && (ps_dec->ps_cur_pps->u1_wted_pred_flag == 0)); /* Initialise MC and formMbPartInfo fn ptrs one time based on profile_idc */ if(uc_nofield_nombaff) { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; } else { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_mp; ps_dec->p_motion_compensate = ih264d_motion_compensate_mp; } } /* * Decide whether to decode the current picture or not */ { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_err->u4_frm_sei_sync == u2_frame_num) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; ps_err->u4_frm_sei_sync = SYNC_FRM_DEFAULT; } ps_err->u4_cur_frm = u2_frame_num; } /* Decision for decoding if the picture is to be skipped */ { WORD32 i4_skip_b_pic, i4_skip_p_pic; i4_skip_b_pic = (ps_dec->u4_skip_frm_mask & B_SLC_BIT) && (B_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); i4_skip_p_pic = (ps_dec->u4_skip_frm_mask & P_SLC_BIT) && (P_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); /**************************************************************/ /* Skip the B picture if skip mask is set for B picture and */ /* Current B picture is a non reference B picture or there is */ /* no user for reference B picture */ /**************************************************************/ if(i4_skip_b_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; /* Don't decode the picture in SKIP-B mode if that picture is B */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } /**************************************************************/ /* Skip the P picture if skip mask is set for P picture and */ /* Current P picture is a non reference P picture or there is */ /* no user for reference P picture */ /**************************************************************/ if(i4_skip_p_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; /* Don't decode the picture in SKIP-P mode if that picture is P */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } } { UWORD16 u2_mb_x, u2_mb_y; ps_dec->i4_submb_ofst = ((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) * SUB_BLK_SIZE) - SUB_BLK_SIZE; if(u2_first_mb_in_slice) { UWORD8 u1_mb_aff; UWORD8 u1_field_pic; UWORD16 u2_frm_wd_in_mbs; u2_frm_wd_in_mbs = ps_seq->u2_frm_wd_in_mbs; u1_mb_aff = ps_cur_slice->u1_mbaff_frame_flag; u1_field_pic = ps_cur_slice->u1_field_pic_flag; { UWORD32 x_offset; UWORD32 y_offset; UWORD32 u4_frame_stride; tfr_ctxt_t *ps_trns_addr; // = &ps_dec->s_tran_addrecon_parse; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = MOD(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y = DIV(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y <<= u1_mb_aff; if((u2_mb_x > u2_frm_wd_in_mbs - 1) || (u2_mb_y > ps_dec->u2_frm_ht_in_mbs - 1)) { return ERROR_CORRUPTED_SLICE; } u4_frame_stride = ps_dec->u2_frm_wd_y << u1_field_pic; x_offset = u2_mb_x << 4; y_offset = (u2_mb_y * u4_frame_stride) << 4; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1 + x_offset + y_offset; u4_frame_stride = ps_dec->u2_frm_wd_uv << u1_field_pic; x_offset >>= 1; y_offset = (u2_mb_y * u4_frame_stride) << 3; x_offset *= YUV420SP_FACTOR; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2 + x_offset + y_offset; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3 + x_offset + y_offset; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; if(ps_dec->u1_separate_parse == 1) { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } else { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } ps_dec->u2_cur_mb_addr = (u2_first_mb_in_slice << u1_mb_aff); ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv + ((u2_first_mb_in_slice << u1_mb_aff) << 4); } } else { tfr_ctxt_t *ps_trns_addr; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = 0xffff; u2_mb_y = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; } ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= ps_cur_slice->u1_mbaff_frame_flag; ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; } /* RBSP stop bit is used for CABAC decoding*/ ps_bitstrm->u4_max_ofst += ps_dec->ps_cur_pps->u1_entropy_coding_mode; ps_dec->u1_B = (u1_slice_type == B_SLICE); ps_dec->u4_next_mb_skip = 0; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->ps_cur_slice->u2_first_mb_in_slice; ps_dec->ps_parse_cur_slice->slice_type = ps_dec->ps_cur_slice->u1_slice_type; ps_dec->u4_start_recon_deblk = 1; { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = ( void *)pu1_buf; } if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } if(u1_slice_type == I_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= I_SLC_BIT; ret = ih264d_parse_islice(ps_dec, u2_first_mb_in_slice); if(ps_dec->i4_pic_type != B_SLICE && ps_dec->i4_pic_type != P_SLICE) ps_dec->i4_pic_type = I_SLICE; } else if(u1_slice_type == P_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; ret = ih264d_parse_pslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; if(ps_dec->i4_pic_type != B_SLICE) ps_dec->i4_pic_type = P_SLICE; } else if(u1_slice_type == B_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; ret = ih264d_parse_bslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; ps_dec->i4_pic_type = B_SLICE; } else return ERROR_INV_SLC_TYPE_T; if(ps_dec->u1_slice_header_done) { /* set to zero to indicate a valid slice has been decoded */ /* first slice header successfully decoded */ ps_dec->u4_first_slice_in_pic = 0; ps_dec->u1_first_slice_in_stream = 0; } if(ret != OK) return ret; /* storing last Mb X and MbY of the slice */ ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; /* End of Picture detection */ if(ps_dec->u2_total_mbs_coded >= (ps_seq->u2_max_mb_addr + 1)) { ps_dec->u1_pic_decode_done = 1; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if((ps_err->u1_err_flag & REJECT_PB_PICS) && (ps_err->u1_cur_pic_type == PIC_TYPE_I)) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } PRINT_BIN_BIT_RATIO(ps_dec) return ret; }
C
Android
1
CVE-2018-17942
https://www.cvedetails.com/cve/CVE-2018-17942/
CWE-119
https://github.com/coreutils/gnulib/commit/278b4175c9d7dd47c1a3071554aac02add3b3c35
278b4175c9d7dd47c1a3071554aac02add3b3c35
vasnprintf: Fix heap memory overrun bug. Reported by Ben Pfaff <blp@cs.stanford.edu> in <https://lists.gnu.org/archive/html/bug-gnulib/2018-09/msg00107.html>. * lib/vasnprintf.c (convert_to_decimal): Allocate one more byte of memory. * tests/test-vasnprintf.c (test_function): Add another test.
floorlog10 (double x) { int exp; double y; double z; double l; /* Split into exponential part and mantissa. */ y = frexp (x, &exp); if (!(y >= 0.0 && y < 1.0)) abort (); if (y == 0.0) return INT_MIN; if (y < 0.5) { while (y < (1.0 / (1 << (GMP_LIMB_BITS / 2)) / (1 << (GMP_LIMB_BITS / 2)))) { y *= 1.0 * (1 << (GMP_LIMB_BITS / 2)) * (1 << (GMP_LIMB_BITS / 2)); exp -= GMP_LIMB_BITS; } if (y < (1.0 / (1 << 16))) { y *= 1.0 * (1 << 16); exp -= 16; } if (y < (1.0 / (1 << 8))) { y *= 1.0 * (1 << 8); exp -= 8; } if (y < (1.0 / (1 << 4))) { y *= 1.0 * (1 << 4); exp -= 4; } if (y < (1.0 / (1 << 2))) { y *= 1.0 * (1 << 2); exp -= 2; } if (y < (1.0 / (1 << 1))) { y *= 1.0 * (1 << 1); exp -= 1; } } if (!(y >= 0.5 && y < 1.0)) abort (); /* Compute an approximation for l = log2(x) = exp + log2(y). */ l = exp; z = y; if (z < 0.70710678118654752444) { z *= 1.4142135623730950488; l -= 0.5; } if (z < 0.8408964152537145431) { z *= 1.1892071150027210667; l -= 0.25; } if (z < 0.91700404320467123175) { z *= 1.0905077326652576592; l -= 0.125; } if (z < 0.9576032806985736469) { z *= 1.0442737824274138403; l -= 0.0625; } /* Now 0.95 <= z <= 1.01. */ z = 1 - z; /* log2(1-z) = 1/log(2) * (- z - z^2/2 - z^3/3 - z^4/4 - ...) Four terms are enough to get an approximation with error < 10^-7. */ l -= 1.4426950408889634074 * z * (1.0 + z * (0.5 + z * ((1.0 / 3) + z * 0.25))); /* Finally multiply with log(2)/log(10), yields an approximation for log10(x). */ l *= 0.30102999566398119523; /* Round down to the next integer. */ return (int) l + (l < 0 ? -1 : 0); }
floorlog10 (double x) { int exp; double y; double z; double l; /* Split into exponential part and mantissa. */ y = frexp (x, &exp); if (!(y >= 0.0 && y < 1.0)) abort (); if (y == 0.0) return INT_MIN; if (y < 0.5) { while (y < (1.0 / (1 << (GMP_LIMB_BITS / 2)) / (1 << (GMP_LIMB_BITS / 2)))) { y *= 1.0 * (1 << (GMP_LIMB_BITS / 2)) * (1 << (GMP_LIMB_BITS / 2)); exp -= GMP_LIMB_BITS; } if (y < (1.0 / (1 << 16))) { y *= 1.0 * (1 << 16); exp -= 16; } if (y < (1.0 / (1 << 8))) { y *= 1.0 * (1 << 8); exp -= 8; } if (y < (1.0 / (1 << 4))) { y *= 1.0 * (1 << 4); exp -= 4; } if (y < (1.0 / (1 << 2))) { y *= 1.0 * (1 << 2); exp -= 2; } if (y < (1.0 / (1 << 1))) { y *= 1.0 * (1 << 1); exp -= 1; } } if (!(y >= 0.5 && y < 1.0)) abort (); /* Compute an approximation for l = log2(x) = exp + log2(y). */ l = exp; z = y; if (z < 0.70710678118654752444) { z *= 1.4142135623730950488; l -= 0.5; } if (z < 0.8408964152537145431) { z *= 1.1892071150027210667; l -= 0.25; } if (z < 0.91700404320467123175) { z *= 1.0905077326652576592; l -= 0.125; } if (z < 0.9576032806985736469) { z *= 1.0442737824274138403; l -= 0.0625; } /* Now 0.95 <= z <= 1.01. */ z = 1 - z; /* log2(1-z) = 1/log(2) * (- z - z^2/2 - z^3/3 - z^4/4 - ...) Four terms are enough to get an approximation with error < 10^-7. */ l -= 1.4426950408889634074 * z * (1.0 + z * (0.5 + z * ((1.0 / 3) + z * 0.25))); /* Finally multiply with log(2)/log(10), yields an approximation for log10(x). */ l *= 0.30102999566398119523; /* Round down to the next integer. */ return (int) l + (l < 0 ? -1 : 0); }
C
gnulib
0
CVE-2018-19045
https://www.cvedetails.com/cve/CVE-2018-19045/
CWE-200
https://github.com/acassen/keepalived/commit/c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
Add command line and configuration option to set umask Issue #1048 identified that files created by keepalived are created with mode 0666. This commit changes the default to 0644, and also allows the umask to be specified in the configuration or as a command line option. Signed-off-by: Quentin Armitage <quentin@armitage.org.uk>
dbus_service_name_handler(vector_t *strvec) { FREE_PTR(global_data->dbus_service_name); global_data->dbus_service_name = set_value(strvec); }
dbus_service_name_handler(vector_t *strvec) { FREE_PTR(global_data->dbus_service_name); global_data->dbus_service_name = set_value(strvec); }
C
keepalived
0
CVE-2017-5120
https://www.cvedetails.com/cve/CVE-2017-5120/
null
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
b7277af490d28ac7f802c015bb0ff31395768556
bindings: Support "attribute FrozenArray<T>?" Adds a quick hack to support a case of "attribute FrozenArray<T>?". Bug: 1028047 Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866 Reviewed-by: Hitoshi Yoshida <peria@chromium.org> Commit-Queue: Yuki Shiino <yukishiino@chromium.org> Cr-Commit-Position: refs/heads/master@{#718676}
static void RuntimeEnabledOverloadedVoidMethod1Method(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); V8StringResource<> string_arg; string_arg = info[0]; if (!string_arg.Prepare()) return; impl->runtimeEnabledOverloadedVoidMethod(string_arg); }
static void RuntimeEnabledOverloadedVoidMethod1Method(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); V8StringResource<> string_arg; string_arg = info[0]; if (!string_arg.Prepare()) return; impl->runtimeEnabledOverloadedVoidMethod(string_arg); }
C
Chrome
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/1266ba494530a267ec8a21442ea1b5cae94da4fb
1266ba494530a267ec8a21442ea1b5cae94da4fb
Introduce XGetImage() for GrabWindowSnapshot() in ChromeOS. BUG=119492 TEST=manually done Review URL: https://chromiumcodereview.appspot.com/10386124 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137556 0039d316-1c4b-4281-b951-d872f2087c98
void RootWindow::MoveCursorTo(const gfx::Point& location_in_dip) { host_->MoveCursorTo(ui::ConvertPointToPixel(layer(), location_in_dip)); }
void RootWindow::MoveCursorTo(const gfx::Point& location_in_dip) { host_->MoveCursorTo(ui::ConvertPointToPixel(layer(), location_in_dip)); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3
511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3
Implement new websocket handshake based on draft-hixie-thewebsocketprotocol-76 BUG=none TEST=net_unittests passes Review URL: http://codereview.chromium.org/1108002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42736 0039d316-1c4b-4281-b951-d872f2087c98
std::string WebSocketHandshake::CreateClientHandshakeMessage() const { std::string WebSocketHandshake::CreateClientHandshakeMessage() { if (!parameter_.get()) { parameter_.reset(new Parameter); parameter_->GenerateKeys(); } std::string msg; // WebSocket protocol 4.1 Opening handshake. msg = "GET "; msg += GetResourceName(); msg += " HTTP/1.1\r\n"; std::vector<std::string> fields; fields.push_back("Upgrade: WebSocket"); fields.push_back("Connection: Upgrade"); fields.push_back("Host: " + GetHostFieldValue()); fields.push_back("Origin: " + GetOriginFieldValue()); if (!protocol_.empty()) fields.push_back("Sec-WebSocket-Protocol: " + protocol_); // TODO(ukai): Add cookie if necessary. fields.push_back("Sec-WebSocket-Key1: " + parameter_->GetSecWebSocketKey1()); fields.push_back("Sec-WebSocket-Key2: " + parameter_->GetSecWebSocketKey2()); std::random_shuffle(fields.begin(), fields.end()); for (size_t i = 0; i < fields.size(); i++) { msg += fields[i] + "\r\n"; } msg += "\r\n"; msg.append(parameter_->GetKey3()); return msg; } int WebSocketHandshake::ReadServerHandshake(const char* data, size_t len) { mode_ = MODE_INCOMPLETE; int eoh = HttpUtil::LocateEndOfHeaders(data, len); if (eoh < 0) return -1; scoped_refptr<HttpResponseHeaders> headers( new HttpResponseHeaders(HttpUtil::AssembleRawHeaders(data, eoh))); if (headers->response_code() != 101) { mode_ = MODE_FAILED; DLOG(INFO) << "Bad response code: " << headers->response_code(); return eoh; } mode_ = MODE_NORMAL; if (!ProcessHeaders(*headers) || !CheckResponseHeaders()) { DLOG(INFO) << "Process Headers failed: " << std::string(data, eoh); mode_ = MODE_FAILED; return eoh; } if (len < static_cast<size_t>(eoh + Parameter::kExpectedResponseSize)) { mode_ = MODE_INCOMPLETE; return -1; } uint8 expected[Parameter::kExpectedResponseSize]; parameter_->GetExpectedResponse(expected); if (memcmp(&data[eoh], expected, Parameter::kExpectedResponseSize)) { mode_ = MODE_FAILED; return eoh + Parameter::kExpectedResponseSize; } mode_ = MODE_CONNECTED; return eoh + Parameter::kExpectedResponseSize; } std::string WebSocketHandshake::GetResourceName() const { std::string resource_name = url_.path(); if (url_.has_query()) { resource_name += "?"; resource_name += url_.query(); } return resource_name; } std::string WebSocketHandshake::GetHostFieldValue() const { // url_.host() is expected to be encoded in punnycode here. std::string host = StringToLowerASCII(url_.host()); if (url_.has_port()) { bool secure = is_secure(); int port = url_.EffectiveIntPort(); if ((!secure && port != kWebSocketPort && port != url_parse::PORT_UNSPECIFIED) || (secure && port != kSecureWebSocketPort && port != url_parse::PORT_UNSPECIFIED)) { host += ":"; host += IntToString(port); } } return host; } std::string WebSocketHandshake::GetOriginFieldValue() const { return StringToLowerASCII(origin_); }
std::string WebSocketHandshake::CreateClientHandshakeMessage() const { std::string msg; msg = "GET "; msg += url_.path(); if (url_.has_query()) { msg += "?"; msg += url_.query(); } msg += " HTTP/1.1\r\n"; msg += kUpgradeHeader; msg += kConnectionHeader; msg += "Host: "; msg += StringToLowerASCII(url_.host()); if (url_.has_port()) { bool secure = is_secure(); int port = url_.EffectiveIntPort(); if ((!secure && port != kWebSocketPort && port != url_parse::PORT_UNSPECIFIED) || (secure && port != kSecureWebSocketPort && port != url_parse::PORT_UNSPECIFIED)) { msg += ":"; msg += IntToString(port); } } msg += "\r\n"; msg += "Origin: "; msg += StringToLowerASCII(origin_); msg += "\r\n"; if (!protocol_.empty()) { msg += "WebSocket-Protocol: "; msg += protocol_; msg += "\r\n"; } msg += "\r\n"; return msg; }
C
Chrome
1
CVE-2017-5330
https://www.cvedetails.com/cve/CVE-2017-5330/
CWE-78
https://cgit.kde.org/ark.git/commit/?id=82fdfd24d46966a117fa625b68784735a40f9065
82fdfd24d46966a117fa625b68784735a40f9065
null
void Part::slotToggleInfoPanel(bool visible) { if (visible) { m_splitter->setSizes(ArkSettings::splitterSizes()); m_infoPanel->show(); } else { ArkSettings::setSplitterSizes(m_splitter->sizes()); m_infoPanel->hide(); } }
void Part::slotToggleInfoPanel(bool visible) { if (visible) { m_splitter->setSizes(ArkSettings::splitterSizes()); m_infoPanel->show(); } else { ArkSettings::setSplitterSizes(m_splitter->sizes()); m_infoPanel->hide(); } }
CPP
kde
0
CVE-2018-9490
https://www.cvedetails.com/cve/CVE-2018-9490/
CWE-704
https://android.googlesource.com/platform/external/v8/+/a24543157ae2cdd25da43e20f4e48a07481e6ceb
a24543157ae2cdd25da43e20f4e48a07481e6ceb
Backport: Fix Object.entries/values with changing elements Bug: 111274046 Test: m -j proxy_resolver_v8_unittest && adb sync && adb shell \ /data/nativetest64/proxy_resolver_v8_unittest/proxy_resolver_v8_unittest Change-Id: I705fc512cc5837e9364ed187559cc75d079aa5cb (cherry picked from commit d8be9a10287afed07705ac8af027d6a46d4def99)
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { FixedArray* parameter_map = FixedArray::cast(obj->elements()); uint32_t length = static_cast<uint32_t>(parameter_map->length()) - 2; if (entry < length) { parameter_map->set_the_hole(entry + 2); } else { Subclass::DeleteFromArguments(obj, entry - length); } }
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { FixedArray* parameter_map = FixedArray::cast(obj->elements()); uint32_t length = static_cast<uint32_t>(parameter_map->length()) - 2; if (entry < length) { parameter_map->set_the_hole(entry + 2); } else { Subclass::DeleteFromArguments(obj, entry - length); } }
C
Android
0
CVE-2017-7541
https://www.cvedetails.com/cve/CVE-2017-7541/
CWE-119
https://github.com/torvalds/linux/commit/8f44c9a41386729fea410e688959ddaa9d51be7c
8f44c9a41386729fea410e688959ddaa9d51be7c
brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx() The lower level nl80211 code in cfg80211 ensures that "len" is between 25 and NL80211_ATTR_FRAME (2304). We subtract DOT11_MGMT_HDR_LEN (24) from "len" so thats's max of 2280. However, the action_frame->data[] buffer is only BRCMF_FIL_ACTION_FRAME_SIZE (1800) bytes long so this memcpy() can overflow. memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], le16_to_cpu(action_frame->len)); Cc: stable@vger.kernel.org # 3.9.x Fixes: 18e2f61db3b70 ("brcmfmac: P2P action frame tx.") Reported-by: "freenerguo(郭大兴)" <freenerguo@tencent.com> Signed-off-by: Arend van Spriel <arend.vanspriel@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap, struct cfg80211_scan_request *request) { struct brcmf_cfg80211_info *cfg = ifp->drvr->config; int err; if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { if (cfg->int_escan_map) brcmf_dbg(SCAN, "aborting internal scan: map=%u\n", cfg->int_escan_map); /* Abort any on-going scan */ brcmf_abort_scanning(cfg); } brcmf_dbg(SCAN, "start internal scan: map=%u\n", fwmap); set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); cfg->escan_info.run = brcmf_run_escan; err = brcmf_do_escan(ifp, request); if (err) { clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); return err; } cfg->int_escan_map = fwmap; return 0; }
static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap, struct cfg80211_scan_request *request) { struct brcmf_cfg80211_info *cfg = ifp->drvr->config; int err; if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { if (cfg->int_escan_map) brcmf_dbg(SCAN, "aborting internal scan: map=%u\n", cfg->int_escan_map); /* Abort any on-going scan */ brcmf_abort_scanning(cfg); } brcmf_dbg(SCAN, "start internal scan: map=%u\n", fwmap); set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); cfg->escan_info.run = brcmf_run_escan; err = brcmf_do_escan(ifp, request); if (err) { clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); return err; } cfg->int_escan_map = fwmap; return 0; }
C
linux
0
CVE-2013-6630
https://www.cvedetails.com/cve/CVE-2013-6630/
CWE-189
https://github.com/chromium/chromium/commit/805eabb91d386c86bd64336c7643f6dfa864151d
805eabb91d386c86bd64336c7643f6dfa864151d
Convert ARRAYSIZE_UNSAFE -> arraysize in base/. R=thestig@chromium.org BUG=423134 Review URL: https://codereview.chromium.org/656033009 Cr-Commit-Position: refs/heads/master@{#299835}
bool CreatePairImpl(HANDLE* socket_a, HANDLE* socket_b, bool overlapped) { DCHECK_NE(socket_a, socket_b); DCHECK_EQ(*socket_a, SyncSocket::kInvalidHandle); DCHECK_EQ(*socket_b, SyncSocket::kInvalidHandle); wchar_t name[kPipePathMax]; ScopedHandle handle_a; DWORD flags = PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE; if (overlapped) flags |= FILE_FLAG_OVERLAPPED; do { unsigned int rnd_name; if (rand_s(&rnd_name) != 0) return false; swprintf(name, kPipePathMax, kPipeNameFormat, GetCurrentProcessId(), GetCurrentThreadId(), rnd_name); handle_a.Set(CreateNamedPipeW( name, flags, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE, 1, kOutBufferSize, kInBufferSize, kDefaultTimeoutMilliSeconds, NULL)); } while (!handle_a.IsValid() && (GetLastError() == ERROR_PIPE_BUSY)); if (!handle_a.IsValid()) { NOTREACHED(); return false; } flags = SECURITY_SQOS_PRESENT | SECURITY_ANONYMOUS; if (overlapped) flags |= FILE_FLAG_OVERLAPPED; ScopedHandle handle_b(CreateFileW(name, GENERIC_READ | GENERIC_WRITE, 0, // no sharing. NULL, // default security attributes. OPEN_EXISTING, // opens existing pipe. flags, NULL)); // no template file. if (!handle_b.IsValid()) { DPLOG(ERROR) << "CreateFileW failed"; return false; } if (!ConnectNamedPipe(handle_a.Get(), NULL)) { DWORD error = GetLastError(); if (error != ERROR_PIPE_CONNECTED) { DPLOG(ERROR) << "ConnectNamedPipe failed"; return false; } } *socket_a = handle_a.Take(); *socket_b = handle_b.Take(); return true; }
bool CreatePairImpl(HANDLE* socket_a, HANDLE* socket_b, bool overlapped) { DCHECK_NE(socket_a, socket_b); DCHECK_EQ(*socket_a, SyncSocket::kInvalidHandle); DCHECK_EQ(*socket_b, SyncSocket::kInvalidHandle); wchar_t name[kPipePathMax]; ScopedHandle handle_a; DWORD flags = PIPE_ACCESS_DUPLEX | FILE_FLAG_FIRST_PIPE_INSTANCE; if (overlapped) flags |= FILE_FLAG_OVERLAPPED; do { unsigned int rnd_name; if (rand_s(&rnd_name) != 0) return false; swprintf(name, kPipePathMax, kPipeNameFormat, GetCurrentProcessId(), GetCurrentThreadId(), rnd_name); handle_a.Set(CreateNamedPipeW( name, flags, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE, 1, kOutBufferSize, kInBufferSize, kDefaultTimeoutMilliSeconds, NULL)); } while (!handle_a.IsValid() && (GetLastError() == ERROR_PIPE_BUSY)); if (!handle_a.IsValid()) { NOTREACHED(); return false; } flags = SECURITY_SQOS_PRESENT | SECURITY_ANONYMOUS; if (overlapped) flags |= FILE_FLAG_OVERLAPPED; ScopedHandle handle_b(CreateFileW(name, GENERIC_READ | GENERIC_WRITE, 0, // no sharing. NULL, // default security attributes. OPEN_EXISTING, // opens existing pipe. flags, NULL)); // no template file. if (!handle_b.IsValid()) { DPLOG(ERROR) << "CreateFileW failed"; return false; } if (!ConnectNamedPipe(handle_a.Get(), NULL)) { DWORD error = GetLastError(); if (error != ERROR_PIPE_CONNECTED) { DPLOG(ERROR) << "ConnectNamedPipe failed"; return false; } } *socket_a = handle_a.Take(); *socket_b = handle_b.Take(); return true; }
C
Chrome
0
CVE-2019-13225
https://www.cvedetails.com/cve/CVE-2019-13225/
CWE-476
https://github.com/kkos/oniguruma/commit/c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c
Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode.
compile_length_anchor_node(AnchorNode* node, regex_t* reg) { int len; int tlen = 0; if (IS_NOT_NULL(NODE_ANCHOR_BODY(node))) { tlen = compile_length_tree(NODE_ANCHOR_BODY(node), reg); if (tlen < 0) return tlen; } switch (node->type) { case ANCR_PREC_READ: len = SIZE_OP_PREC_READ_START + tlen + SIZE_OP_PREC_READ_END; break; case ANCR_PREC_READ_NOT: len = SIZE_OP_PREC_READ_NOT_START + tlen + SIZE_OP_PREC_READ_NOT_END; break; case ANCR_LOOK_BEHIND: len = SIZE_OP_LOOK_BEHIND + tlen; break; case ANCR_LOOK_BEHIND_NOT: len = SIZE_OP_LOOK_BEHIND_NOT_START + tlen + SIZE_OP_LOOK_BEHIND_NOT_END; break; case ANCR_WORD_BOUNDARY: case ANCR_NO_WORD_BOUNDARY: #ifdef USE_WORD_BEGIN_END case ANCR_WORD_BEGIN: case ANCR_WORD_END: #endif len = SIZE_OP_WORD_BOUNDARY; break; case ANCR_TEXT_SEGMENT_BOUNDARY: case ANCR_NO_TEXT_SEGMENT_BOUNDARY: len = SIZE_OPCODE; break; default: len = SIZE_OPCODE; break; } return len; }
compile_length_anchor_node(AnchorNode* node, regex_t* reg) { int len; int tlen = 0; if (IS_NOT_NULL(NODE_ANCHOR_BODY(node))) { tlen = compile_length_tree(NODE_ANCHOR_BODY(node), reg); if (tlen < 0) return tlen; } switch (node->type) { case ANCR_PREC_READ: len = SIZE_OP_PREC_READ_START + tlen + SIZE_OP_PREC_READ_END; break; case ANCR_PREC_READ_NOT: len = SIZE_OP_PREC_READ_NOT_START + tlen + SIZE_OP_PREC_READ_NOT_END; break; case ANCR_LOOK_BEHIND: len = SIZE_OP_LOOK_BEHIND + tlen; break; case ANCR_LOOK_BEHIND_NOT: len = SIZE_OP_LOOK_BEHIND_NOT_START + tlen + SIZE_OP_LOOK_BEHIND_NOT_END; break; case ANCR_WORD_BOUNDARY: case ANCR_NO_WORD_BOUNDARY: #ifdef USE_WORD_BEGIN_END case ANCR_WORD_BEGIN: case ANCR_WORD_END: #endif len = SIZE_OP_WORD_BOUNDARY; break; case ANCR_TEXT_SEGMENT_BOUNDARY: case ANCR_NO_TEXT_SEGMENT_BOUNDARY: len = SIZE_OPCODE; break; default: len = SIZE_OPCODE; break; } return len; }
C
oniguruma
0
CVE-2016-2464
https://www.cvedetails.com/cve/CVE-2016-2464/
CWE-20
https://android.googlesource.com/platform/external/libvpx/+/cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
long Segment::DoLoadCluster(long long& pos, long& len) { if (m_pos < 0) return DoLoadClusterUnknownSize(pos, len); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; if (total >= 0 && avail > total) return E_FILE_FORMAT_INVALID; const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long cluster_off = -1; // offset relative to start of segment long long cluster_size = -1; // size of cluster payload for (;;) { if ((total >= 0) && (m_pos >= total)) return 1; // no more clusters if ((segment_stop >= 0) && (m_pos >= segment_stop)) return 1; // no more clusters pos = m_pos; if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadID(m_pReader, idpos, len); if (id < 0) return E_FILE_FORMAT_INVALID; pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element if (size == 0) { // weird m_pos = pos; continue; } const long long unknown_size = (1LL << (7 * len)) - 1; if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize if (m_pCues == NULL) { const long long element_size = (pos - idpos) + size; m_pCues = new (std::nothrow) Cues(this, pos, size, idpos, element_size); if (m_pCues == NULL) return -1; } m_pos = pos + size; // consume payload continue; } if (id != 0x0F43B675) { // Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize m_pos = pos + size; // consume payload continue; } cluster_off = idpos - m_start; // relative pos if (size != unknown_size) cluster_size = size; break; } if (cluster_off < 0) { // No cluster, die. return E_FILE_FORMAT_INVALID; } long long pos_; long len_; status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_); if (status < 0) { // error, or underflow pos = pos_; len = len_; return status; } const long idx = m_clusterCount; if (m_clusterPreloadCount > 0) { if (idx >= m_clusterSize) return E_FILE_FORMAT_INVALID; Cluster* const pCluster = m_clusters[idx]; if (pCluster == NULL || pCluster->m_index >= 0) return E_FILE_FORMAT_INVALID; const long long off = pCluster->GetPosition(); if (off < 0) return E_FILE_FORMAT_INVALID; if (off == cluster_off) { // preloaded already if (status == 0) // no entries found return E_FILE_FORMAT_INVALID; if (cluster_size >= 0) pos += cluster_size; else { const long long element_size = pCluster->GetElementSize(); if (element_size <= 0) return E_FILE_FORMAT_INVALID; // TODO: handle this case pos = pCluster->m_element_start + element_size; } pCluster->m_index = idx; // move from preloaded to loaded ++m_clusterCount; --m_clusterPreloadCount; m_pos = pos; // consume payload if (segment_stop >= 0 && m_pos > segment_stop) return E_FILE_FORMAT_INVALID; return 0; // success } } if (status == 0) { // no entries found if (cluster_size >= 0) pos += cluster_size; if ((total >= 0) && (pos >= total)) { m_pos = total; return 1; // no more clusters } if ((segment_stop >= 0) && (pos >= segment_stop)) { m_pos = segment_stop; return 1; // no more clusters } m_pos = pos; return 2; // try again } Cluster* const pCluster = Cluster::Create(this, idx, cluster_off); if (pCluster == NULL) return -1; if (!AppendCluster(pCluster)) { delete pCluster; return -1; } if (cluster_size >= 0) { pos += cluster_size; m_pos = pos; if (segment_stop > 0 && m_pos > segment_stop) return E_FILE_FORMAT_INVALID; return 0; } m_pUnknownSize = pCluster; m_pos = -pos; return 0; // partial success, since we have a new cluster // status == 0 means "no block entries found" // pos designates start of payload // m_pos has NOT been adjusted yet (in case we need to come back here) }
long Segment::DoLoadCluster(long long& pos, long& len) { if (m_pos < 0) return DoLoadClusterUnknownSize(pos, len); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long cluster_off = -1; // offset relative to start of segment long long cluster_size = -1; // size of cluster payload for (;;) { if ((total >= 0) && (m_pos >= total)) return 1; // no more clusters if ((segment_stop >= 0) && (m_pos >= segment_stop)) return 1; // no more clusters pos = m_pos; if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) // error (or underflow) return static_cast<long>(id); pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element if (size == 0) { // weird m_pos = pos; continue; } const long long unknown_size = (1LL << (7 * len)) - 1; #if 0 // we must handle this to support live webm if (size == unknown_size) return E_FILE_FORMAT_INVALID; //TODO: allow this #endif if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } #if 0 // commented-out, to support incremental cluster parsing len = static_cast<long>(size); if ((pos + size) > avail) return E_BUFFER_NOT_FULL; #endif if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize if (m_pCues == NULL) { const long long element_size = (pos - idpos) + size; m_pCues = new Cues(this, pos, size, idpos, element_size); assert(m_pCues); // TODO } m_pos = pos + size; // consume payload continue; } if (id != 0x0F43B675) { // Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; // TODO: liberalize m_pos = pos + size; // consume payload continue; } cluster_off = idpos - m_start; // relative pos if (size != unknown_size) cluster_size = size; break; } assert(cluster_off >= 0); // have cluster long long pos_; long len_; status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_); if (status < 0) { // error, or underflow pos = pos_; len = len_; return status; } const long idx = m_clusterCount; if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) { // preloaded already if (status == 0) // no entries found return E_FILE_FORMAT_INVALID; if (cluster_size >= 0) pos += cluster_size; else { const long long element_size = pCluster->GetElementSize(); if (element_size <= 0) return E_FILE_FORMAT_INVALID; // TODO: handle this case pos = pCluster->m_element_start + element_size; } pCluster->m_index = idx; // move from preloaded to loaded ++m_clusterCount; --m_clusterPreloadCount; m_pos = pos; // consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; // success } } if (status == 0) { // no entries found if (cluster_size < 0) return E_FILE_FORMAT_INVALID; // TODO: handle this pos += cluster_size; if ((total >= 0) && (pos >= total)) { m_pos = total; return 1; // no more clusters } if ((segment_stop >= 0) && (pos >= segment_stop)) { m_pos = segment_stop; return 1; // no more clusters } m_pos = pos; return 2; // try again } Cluster* const pCluster = Cluster::Create(this, idx, cluster_off); assert(pCluster); AppendCluster(pCluster); assert(m_clusters); assert(idx < m_clusterSize); assert(m_clusters[idx] == pCluster); if (cluster_size >= 0) { pos += cluster_size; m_pos = pos; assert((segment_stop < 0) || (m_pos <= segment_stop)); return 0; } m_pUnknownSize = pCluster; m_pos = -pos; return 0; // partial success, since we have a new cluster #if 0 if (cluster_size < 0) { //unknown size const long long payload_pos = pos; //absolute pos of cluster payload for (;;) { //determine cluster size if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; //no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) //error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) //Cluster ID break; if (id == 0x0C53BB6B) //Cues ID break; switch (id) { case 0x20: //BlockGroup case 0x23: //Simple Block case 0x67: //TimeCode case 0x2B: //PrevSize break; default: assert(false); break; } pos += len; //consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume size field of element if (size == 0) //weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; //not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird return E_FILE_FORMAT_INVALID; pos += size; //consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } //determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); pos = payload_pos; //reset and re-parse original cluster } if (m_clusterPreloadCount > 0) { assert(idx < m_clusterSize); Cluster* const pCluster = m_clusters[idx]; assert(pCluster); assert(pCluster->m_index < 0); const long long off = pCluster->GetPosition(); assert(off >= 0); if (off == cluster_off) //preloaded already return E_FILE_FORMAT_INVALID; //subtle } m_pos = pos + cluster_size; //consume payload assert((segment_stop < 0) || (m_pos <= segment_stop)); return 2; //try to find another cluster #endif }
C
Android
1